From cc6dd2fcc390433369ce6bade475ec3379dd01da Mon Sep 17 00:00:00 2001 From: Gilles Miraillet Date: Thu, 30 May 2024 09:55:23 +0200 Subject: [PATCH] Adds possibility to call BBS to retrieve stats for application's processes states. --- collectors/applications.go | 16 +- collectors/collectors.go | 12 +- fetcher/bbs_client.go | 62 + fetcher/fetcher.go | 23 +- fetcher/fetcher_handlers.go | 78 +- fetcher/fetcher_test.go | 2 +- fetcher/worker.go | 10 +- filters/filters.go | 5 +- go.mod | 25 +- go.sum | 77 +- main.go | 42 +- models/model.go | 4 + vendor/code.cloudfoundry.org/bbs/.gitignore | 3 + vendor/code.cloudfoundry.org/bbs/CODEOWNERS | 1 + vendor/code.cloudfoundry.org/bbs/LICENSE | 201 + vendor/code.cloudfoundry.org/bbs/NOTICE | 18 + vendor/code.cloudfoundry.org/bbs/README.md | 83 + vendor/code.cloudfoundry.org/bbs/client.go | 1019 +++ .../bbs/encryption/crypt.go | 76 + .../bbs/encryption/encryption_config.go | 42 + .../bbs/encryption/key.go | 49 + .../bbs/encryption/key_manager.go | 39 + .../bbs/encryption/package.go | 1 + .../bbs/events/event_source.go | 269 + .../code.cloudfoundry.org/bbs/events/hub.go | 200 + .../bbs/events/package.go | 1 + .../bbs/format/encoding.go | 102 + .../bbs/format/envelope.go | 61 + .../bbs/format/format.go | 38 + .../bbs/format/package.go | 1 + .../bbs/format/versioner.go | 16 + .../bbs/models/actions.go | 592 ++ .../bbs/models/actions.pb.go | 5076 ++++++++++++ .../bbs/models/actions.proto | 102 + .../bbs/models/actual_lrp.go | 521 ++ .../bbs/models/actual_lrp.pb.go | 3220 ++++++++ .../bbs/models/actual_lrp.proto | 75 + .../bbs/models/actual_lrp_requests.go | 346 + .../bbs/models/actual_lrp_requests.pb.go | 4872 +++++++++++ .../bbs/models/actual_lrp_requests.proto | 94 + .../bbs/models/bbs_presence.go | 38 + .../bbs/models/cached_dependency.go | 59 + .../bbs/models/cached_dependency.pb.go | 723 ++ .../bbs/models/cached_dependency.proto | 17 + .../bbs/models/cell_presence.go | 153 + .../bbs/models/cells.pb.go | 1703 ++++ .../bbs/models/cells.proto | 33 + .../bbs/models/certificate_properties.pb.go | 385 + .../bbs/models/certificate_properties.proto | 8 + .../bbs/models/check_definition.go | 55 + .../bbs/models/check_definition.pb.go | 1453 ++++ .../bbs/models/check_definition.proto | 33 + .../bbs/models/desired_lrp.go | 785 ++ .../bbs/models/desired_lrp.pb.go | 7240 +++++++++++++++++ .../bbs/models/desired_lrp.proto | 150 + .../bbs/models/desired_lrp_requests.go | 69 + .../bbs/models/desired_lrp_requests.pb.go | 2806 +++++++ .../bbs/models/desired_lrp_requests.proto | 53 + .../bbs/models/domain.pb.go | 853 ++ .../bbs/models/domain.proto | 22 + .../bbs/models/domains.go | 36 + .../bbs/models/environment_variables.go | 10 + .../bbs/models/environment_variables.pb.go | 436 + .../bbs/models/environment_variables.proto | 10 + .../bbs/models/error.pb.go | 515 ++ .../bbs/models/error.proto | 52 + .../bbs/models/errors.go | 186 + .../bbs/models/evacuation.go | 12 + .../bbs/models/evacuation.pb.go | 2503 ++++++ .../bbs/models/evacuation.proto | 53 + .../bbs/models/events.go | 329 + .../bbs/models/events.pb.go | 4977 +++++++++++ .../bbs/models/events.proto | 99 + .../bbs/models/image_layer.go | 228 + .../bbs/models/image_layer.pb.go | 788 ++ .../bbs/models/image_layer.proto | 34 + .../code.cloudfoundry.org/bbs/models/json.go | 54 + .../bbs/models/log_rate_limit.pb.go | 360 + .../bbs/models/log_rate_limit.proto | 7 + .../bbs/models/lrp_convergence.go | 6 + .../bbs/models/metric_tags.go | 101 + .../bbs/models/metric_tags.pb.go | 460 ++ .../bbs/models/metric_tags.proto | 18 + .../bbs/models/models.go | 8 + .../bbs/models/modification_tag.go | 20 + .../bbs/models/modification_tag.pb.go | 419 + .../bbs/models/modification_tag.proto | 11 + .../bbs/models/network.pb.go | 522 ++ .../bbs/models/network.proto | 11 + .../bbs/models/package.go | 1 + .../bbs/models/ping.pb.go | 368 + .../bbs/models/ping.proto | 9 + .../bbs/models/restart_calculator.go | 85 + .../bbs/models/routes.go | 77 + .../bbs/models/security_group.pb.go | 1282 +++ .../bbs/models/security_group.proto | 25 + .../bbs/models/security_groups.go | 157 + .../bbs/models/sidecar.go | 32 + .../bbs/models/sidecar.pb.go | 472 ++ .../bbs/models/sidecar.proto | 13 + .../code.cloudfoundry.org/bbs/models/task.go | 206 + .../bbs/models/task.pb.go | 3018 +++++++ .../bbs/models/task.proto | 76 + .../bbs/models/task_requests.go | 125 + .../bbs/models/task_requests.pb.go | 4016 +++++++++ .../bbs/models/task_requests.proto | 78 + .../bbs/models/validator.go | 58 + .../bbs/models/version.go | 5 + .../bbs/models/volume_mount.go | 34 + .../bbs/models/volume_mount.pb.go | 1061 +++ .../bbs/models/volume_mount.proto | 28 + vendor/code.cloudfoundry.org/bbs/package.go | 1 + vendor/code.cloudfoundry.org/bbs/routes.go | 162 + .../bbs/trace/request_id.go | 53 + .../cfhttp/v2/.gitignore | 1 + .../cfhttp/v2/CODEOWNERS | 1 + .../code.cloudfoundry.org/cfhttp/v2/LICENSE | 201 + vendor/code.cloudfoundry.org/cfhttp/v2/NOTICE | 20 + .../code.cloudfoundry.org/cfhttp/v2/README.md | 30 + .../code.cloudfoundry.org/cfhttp/v2/client.go | 129 + .../cfhttp/v2/package.go | 1 + .../cfhttp/v2/staticcheck.conf | 1 + .../code.cloudfoundry.org/lager/v3/.gitignore | 38 + .../code.cloudfoundry.org/lager/v3/CODEOWNERS | 1 + vendor/code.cloudfoundry.org/lager/v3/LICENSE | 201 + vendor/code.cloudfoundry.org/lager/v3/NOTICE | 20 + .../code.cloudfoundry.org/lager/v3/README.md | 102 + .../code.cloudfoundry.org/lager/v3/handler.go | 162 + .../lager/v3/internal/truncate/package.go | 1 + .../lager/v3/internal/truncate/truncate.go | 174 + .../lager/v3/json_redacter.go | 115 + .../code.cloudfoundry.org/lager/v3/logger.go | 217 + .../code.cloudfoundry.org/lager/v3/models.go | 151 + .../lager/v3/reconfigurable_sink.go | 37 + .../lager/v3/redacting_sink.go | 62 + .../lager/v3/slog_sink.go | 63 + .../code.cloudfoundry.org/lager/v3/tools.go | 8 + .../lager/v3/truncating_sink.go | 32 + .../lager/v3/writer_sink.go | 66 + vendor/github.com/go-logr/logr/README.md | 73 +- vendor/github.com/go-logr/logr/context.go | 33 + .../github.com/go-logr/logr/context_noslog.go | 49 + .../github.com/go-logr/logr/context_slog.go | 83 + vendor/github.com/go-logr/logr/logr.go | 43 - .../go-logr/logr/{slogr => }/sloghandler.go | 98 +- vendor/github.com/go-logr/logr/slogr.go | 100 + vendor/github.com/go-logr/logr/slogr/slogr.go | 77 +- .../go-logr/logr/{slogr => }/slogsink.go | 24 +- .../go-task/slim-sprig/v3/.editorconfig | 14 + .../go-task/slim-sprig/v3/.gitattributes | 1 + .../go-task/slim-sprig/v3/.gitignore | 2 + .../go-task/slim-sprig/v3/CHANGELOG.md | 383 + .../go-task/slim-sprig/v3/LICENSE.txt | 19 + .../go-task/slim-sprig/v3/README.md | 73 + .../go-task/slim-sprig/v3/Taskfile.yml | 12 + .../go-task/slim-sprig/v3/crypto.go | 24 + .../github.com/go-task/slim-sprig/v3/date.go | 152 + .../go-task/slim-sprig/v3/defaults.go | 163 + .../github.com/go-task/slim-sprig/v3/dict.go | 118 + .../github.com/go-task/slim-sprig/v3/doc.go | 19 + .../go-task/slim-sprig/v3/functions.go | 317 + .../github.com/go-task/slim-sprig/v3/list.go | 464 ++ .../go-task/slim-sprig/v3/network.go | 12 + .../go-task/slim-sprig/v3/numeric.go | 228 + .../go-task/slim-sprig/v3/reflect.go | 28 + .../github.com/go-task/slim-sprig/v3/regex.go | 83 + .../go-task/slim-sprig/v3/strings.go | 189 + .../github.com/go-task/slim-sprig/v3/url.go | 66 + .../gogo/protobuf/gogoproto/Makefile | 37 + .../github.com/gogo/protobuf/gogoproto/doc.go | 169 + .../gogo/protobuf/gogoproto/gogo.pb.go | 874 ++ .../gogo/protobuf/gogoproto/gogo.pb.golden | 45 + .../gogo/protobuf/gogoproto/gogo.proto | 144 + .../gogo/protobuf/gogoproto/helper.go | 415 + .../protoc-gen-gogo/descriptor/Makefile | 36 + .../protoc-gen-gogo/descriptor/descriptor.go | 118 + .../descriptor/descriptor.pb.go | 2865 +++++++ .../descriptor/descriptor_gostring.gen.go | 752 ++ .../protoc-gen-gogo/descriptor/helper.go | 390 + vendor/github.com/google/pprof/AUTHORS | 7 + vendor/github.com/google/pprof/CONTRIBUTORS | 16 + vendor/github.com/google/pprof/LICENSE | 202 + .../github.com/google/pprof/profile/encode.go | 591 ++ .../github.com/google/pprof/profile/filter.go | 274 + .../github.com/google/pprof/profile/index.go | 64 + .../pprof/profile/legacy_java_profile.go | 315 + .../google/pprof/profile/legacy_profile.go | 1228 +++ .../github.com/google/pprof/profile/merge.go | 669 ++ .../google/pprof/profile/profile.go | 864 ++ .../github.com/google/pprof/profile/proto.go | 367 + .../github.com/google/pprof/profile/prune.go | 194 + vendor/github.com/onsi/ginkgo/v2/LICENSE | 20 + .../onsi/ginkgo/v2/config/deprecated.go | 69 + .../ginkgo/v2/formatter/colorable_others.go | 41 + .../ginkgo/v2/formatter/colorable_windows.go | 809 ++ .../onsi/ginkgo/v2/formatter/formatter.go | 230 + .../ginkgo/v2/ginkgo/build/build_command.go | 63 + .../onsi/ginkgo/v2/ginkgo/command/abort.go | 61 + .../onsi/ginkgo/v2/ginkgo/command/command.go | 50 + .../onsi/ginkgo/v2/ginkgo/command/program.go | 182 + .../ginkgo/generators/boostrap_templates.go | 48 + .../v2/ginkgo/generators/bootstrap_command.go | 133 + .../v2/ginkgo/generators/generate_command.go | 265 + .../ginkgo/generators/generate_templates.go | 43 + .../v2/ginkgo/generators/generators_common.go | 76 + .../onsi/ginkgo/v2/ginkgo/internal/compile.go | 161 + .../ginkgo/v2/ginkgo/internal/gocovmerge.go | 129 + .../ginkgo/internal/profiles_and_reports.go | 227 + .../onsi/ginkgo/v2/ginkgo/internal/run.go | 355 + .../ginkgo/v2/ginkgo/internal/test_suite.go | 284 + .../onsi/ginkgo/v2/ginkgo/internal/utils.go | 86 + .../v2/ginkgo/internal/verify_version.go | 54 + .../ginkgo/v2/ginkgo/labels/labels_command.go | 123 + .../github.com/onsi/ginkgo/v2/ginkgo/main.go | 58 + .../onsi/ginkgo/v2/ginkgo/outline/ginkgo.go | 301 + .../onsi/ginkgo/v2/ginkgo/outline/import.go | 58 + .../onsi/ginkgo/v2/ginkgo/outline/outline.go | 110 + .../v2/ginkgo/outline/outline_command.go | 98 + .../onsi/ginkgo/v2/ginkgo/run/run_command.go | 232 + .../v2/ginkgo/unfocus/unfocus_command.go | 186 + .../onsi/ginkgo/v2/ginkgo/watch/delta.go | 22 + .../ginkgo/v2/ginkgo/watch/delta_tracker.go | 75 + .../ginkgo/v2/ginkgo/watch/dependencies.go | 92 + .../ginkgo/v2/ginkgo/watch/package_hash.go | 117 + .../ginkgo/v2/ginkgo/watch/package_hashes.go | 85 + .../onsi/ginkgo/v2/ginkgo/watch/suite.go | 87 + .../ginkgo/v2/ginkgo/watch/watch_command.go | 192 + .../interrupt_handler/interrupt_handler.go | 177 + .../sigquit_swallower_unix.go | 15 + .../sigquit_swallower_windows.go | 8 + .../parallel_support/client_server.go | 72 + .../internal/parallel_support/http_client.go | 169 + .../internal/parallel_support/http_server.go | 242 + .../internal/parallel_support/rpc_client.go | 136 + .../internal/parallel_support/rpc_server.go | 75 + .../parallel_support/server_handler.go | 234 + .../ginkgo/v2/reporters/default_reporter.go | 780 ++ .../v2/reporters/deprecated_reporter.go | 149 + .../onsi/ginkgo/v2/reporters/json_report.go | 69 + .../onsi/ginkgo/v2/reporters/junit_report.go | 389 + .../onsi/ginkgo/v2/reporters/reporter.go | 29 + .../ginkgo/v2/reporters/teamcity_report.go | 105 + .../onsi/ginkgo/v2/types/code_location.go | 159 + .../github.com/onsi/ginkgo/v2/types/config.go | 761 ++ .../onsi/ginkgo/v2/types/deprecated_types.go | 141 + .../ginkgo/v2/types/deprecation_support.go | 177 + .../onsi/ginkgo/v2/types/enum_support.go | 43 + .../github.com/onsi/ginkgo/v2/types/errors.go | 639 ++ .../onsi/ginkgo/v2/types/file_filter.go | 106 + .../github.com/onsi/ginkgo/v2/types/flags.go | 490 ++ .../onsi/ginkgo/v2/types/label_filter.go | 358 + .../onsi/ginkgo/v2/types/report_entry.go | 190 + .../github.com/onsi/ginkgo/v2/types/types.go | 914 +++ .../onsi/ginkgo/v2/types/version.go | 3 + vendor/github.com/onsi/gomega/CHANGELOG.md | 35 + .../github.com/onsi/gomega/ghttp/handlers.go | 23 +- vendor/github.com/onsi/gomega/gomega_dsl.go | 2 +- vendor/github.com/onsi/gomega/matchers.go | 15 +- .../matchers/be_comparable_to_matcher.go | 4 +- .../onsi/gomega/matchers/receive_matcher.go | 70 +- .../github.com/openzipkin/zipkin-go/LICENSE | 201 + .../zipkin-go/idgenerator/idgenerator.go | 130 + .../openzipkin/zipkin-go/model/annotation.go | 60 + .../openzipkin/zipkin-go/model/doc.go | 23 + .../openzipkin/zipkin-go/model/endpoint.go | 50 + .../openzipkin/zipkin-go/model/kind.go | 27 + .../openzipkin/zipkin-go/model/span.go | 161 + .../openzipkin/zipkin-go/model/span_id.go | 44 + .../openzipkin/zipkin-go/model/traceid.go | 75 + vendor/github.com/vito/go-sse/LICENSE.md | 201 + vendor/github.com/vito/go-sse/sse/errors.go | 5 + vendor/github.com/vito/go-sse/sse/event.go | 71 + .../vito/go-sse/sse/event_source.go | 268 + .../github.com/vito/go-sse/sse/read_closer.go | 122 + .../x/crypto/chacha20/chacha_ppc64le.s | 110 +- vendor/golang.org/x/crypto/ssh/server.go | 170 +- vendor/golang.org/x/sys/unix/mmap_nomremap.go | 2 +- .../x/sys/unix/syscall_zos_s390x.go | 8 + .../x/sys/windows/syscall_windows.go | 82 + .../golang.org/x/sys/windows/types_windows.go | 24 + .../x/sys/windows/zsyscall_windows.go | 126 +- vendor/golang.org/x/tools/LICENSE | 27 + vendor/golang.org/x/tools/PATENTS | 22 + vendor/golang.org/x/tools/cover/profile.go | 266 + .../x/tools/go/ast/inspector/inspector.go | 220 + .../x/tools/go/ast/inspector/typeof.go | 227 + .../protobuf/protoadapt/convert.go | 31 + vendor/modules.txt | 73 +- 288 files changed, 87643 insertions(+), 420 deletions(-) create mode 100644 fetcher/bbs_client.go create mode 100644 vendor/code.cloudfoundry.org/bbs/.gitignore create mode 100644 vendor/code.cloudfoundry.org/bbs/CODEOWNERS create mode 100644 vendor/code.cloudfoundry.org/bbs/LICENSE create mode 100644 vendor/code.cloudfoundry.org/bbs/NOTICE create mode 100644 vendor/code.cloudfoundry.org/bbs/README.md create mode 100644 vendor/code.cloudfoundry.org/bbs/client.go create mode 100644 vendor/code.cloudfoundry.org/bbs/encryption/crypt.go create mode 100644 vendor/code.cloudfoundry.org/bbs/encryption/encryption_config.go create mode 100644 vendor/code.cloudfoundry.org/bbs/encryption/key.go create mode 100644 vendor/code.cloudfoundry.org/bbs/encryption/key_manager.go create mode 100644 vendor/code.cloudfoundry.org/bbs/encryption/package.go create mode 100644 vendor/code.cloudfoundry.org/bbs/events/event_source.go create mode 100644 vendor/code.cloudfoundry.org/bbs/events/hub.go create mode 100644 vendor/code.cloudfoundry.org/bbs/events/package.go create mode 100644 vendor/code.cloudfoundry.org/bbs/format/encoding.go create mode 100644 vendor/code.cloudfoundry.org/bbs/format/envelope.go create mode 100644 vendor/code.cloudfoundry.org/bbs/format/format.go create mode 100644 vendor/code.cloudfoundry.org/bbs/format/package.go create mode 100644 vendor/code.cloudfoundry.org/bbs/format/versioner.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/actions.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/actions.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/actions.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/actual_lrp.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/actual_lrp.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/actual_lrp.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/actual_lrp_requests.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/actual_lrp_requests.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/actual_lrp_requests.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/bbs_presence.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/cached_dependency.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/cached_dependency.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/cached_dependency.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/cell_presence.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/cells.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/cells.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/certificate_properties.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/certificate_properties.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/check_definition.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/check_definition.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/check_definition.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/desired_lrp.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/desired_lrp.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/desired_lrp.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/desired_lrp_requests.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/desired_lrp_requests.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/desired_lrp_requests.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/domain.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/domain.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/domains.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/environment_variables.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/environment_variables.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/environment_variables.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/error.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/error.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/errors.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/evacuation.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/evacuation.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/evacuation.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/events.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/events.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/events.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/image_layer.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/image_layer.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/image_layer.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/json.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/log_rate_limit.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/log_rate_limit.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/lrp_convergence.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/metric_tags.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/metric_tags.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/metric_tags.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/models.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/modification_tag.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/modification_tag.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/modification_tag.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/network.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/network.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/package.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/ping.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/ping.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/restart_calculator.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/routes.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/security_group.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/security_group.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/security_groups.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/sidecar.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/sidecar.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/sidecar.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/task.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/task.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/task.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/task_requests.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/task_requests.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/task_requests.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/models/validator.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/version.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/volume_mount.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/volume_mount.pb.go create mode 100644 vendor/code.cloudfoundry.org/bbs/models/volume_mount.proto create mode 100644 vendor/code.cloudfoundry.org/bbs/package.go create mode 100644 vendor/code.cloudfoundry.org/bbs/routes.go create mode 100644 vendor/code.cloudfoundry.org/bbs/trace/request_id.go create mode 100644 vendor/code.cloudfoundry.org/cfhttp/v2/.gitignore create mode 100644 vendor/code.cloudfoundry.org/cfhttp/v2/CODEOWNERS create mode 100644 vendor/code.cloudfoundry.org/cfhttp/v2/LICENSE create mode 100644 vendor/code.cloudfoundry.org/cfhttp/v2/NOTICE create mode 100644 vendor/code.cloudfoundry.org/cfhttp/v2/README.md create mode 100644 vendor/code.cloudfoundry.org/cfhttp/v2/client.go create mode 100644 vendor/code.cloudfoundry.org/cfhttp/v2/package.go create mode 100644 vendor/code.cloudfoundry.org/cfhttp/v2/staticcheck.conf create mode 100644 vendor/code.cloudfoundry.org/lager/v3/.gitignore create mode 100644 vendor/code.cloudfoundry.org/lager/v3/CODEOWNERS create mode 100644 vendor/code.cloudfoundry.org/lager/v3/LICENSE create mode 100644 vendor/code.cloudfoundry.org/lager/v3/NOTICE create mode 100644 vendor/code.cloudfoundry.org/lager/v3/README.md create mode 100644 vendor/code.cloudfoundry.org/lager/v3/handler.go create mode 100644 vendor/code.cloudfoundry.org/lager/v3/internal/truncate/package.go create mode 100644 vendor/code.cloudfoundry.org/lager/v3/internal/truncate/truncate.go create mode 100644 vendor/code.cloudfoundry.org/lager/v3/json_redacter.go create mode 100644 vendor/code.cloudfoundry.org/lager/v3/logger.go create mode 100644 vendor/code.cloudfoundry.org/lager/v3/models.go create mode 100644 vendor/code.cloudfoundry.org/lager/v3/reconfigurable_sink.go create mode 100644 vendor/code.cloudfoundry.org/lager/v3/redacting_sink.go create mode 100644 vendor/code.cloudfoundry.org/lager/v3/slog_sink.go create mode 100644 vendor/code.cloudfoundry.org/lager/v3/tools.go create mode 100644 vendor/code.cloudfoundry.org/lager/v3/truncating_sink.go create mode 100644 vendor/code.cloudfoundry.org/lager/v3/writer_sink.go create mode 100644 vendor/github.com/go-logr/logr/context.go create mode 100644 vendor/github.com/go-logr/logr/context_noslog.go create mode 100644 vendor/github.com/go-logr/logr/context_slog.go rename vendor/github.com/go-logr/logr/{slogr => }/sloghandler.go (63%) create mode 100644 vendor/github.com/go-logr/logr/slogr.go rename vendor/github.com/go-logr/logr/{slogr => }/slogsink.go (82%) create mode 100644 vendor/github.com/go-task/slim-sprig/v3/.editorconfig create mode 100644 vendor/github.com/go-task/slim-sprig/v3/.gitattributes create mode 100644 vendor/github.com/go-task/slim-sprig/v3/.gitignore create mode 100644 vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md create mode 100644 vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt create mode 100644 vendor/github.com/go-task/slim-sprig/v3/README.md create mode 100644 vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml create mode 100644 vendor/github.com/go-task/slim-sprig/v3/crypto.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/date.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/defaults.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/dict.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/doc.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/functions.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/list.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/network.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/numeric.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/reflect.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/regex.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/strings.go create mode 100644 vendor/github.com/go-task/slim-sprig/v3/url.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/Makefile create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/doc.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.proto create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/helper.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go create mode 100644 vendor/github.com/google/pprof/AUTHORS create mode 100644 vendor/github.com/google/pprof/CONTRIBUTORS create mode 100644 vendor/github.com/google/pprof/LICENSE create mode 100644 vendor/github.com/google/pprof/profile/encode.go create mode 100644 vendor/github.com/google/pprof/profile/filter.go create mode 100644 vendor/github.com/google/pprof/profile/index.go create mode 100644 vendor/github.com/google/pprof/profile/legacy_java_profile.go create mode 100644 vendor/github.com/google/pprof/profile/legacy_profile.go create mode 100644 vendor/github.com/google/pprof/profile/merge.go create mode 100644 vendor/github.com/google/pprof/profile/profile.go create mode 100644 vendor/github.com/google/pprof/profile/proto.go create mode 100644 vendor/github.com/google/pprof/profile/prune.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/LICENSE create mode 100644 vendor/github.com/onsi/ginkgo/v2/config/deprecated.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/code_location.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/config.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/enum_support.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/errors.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/file_filter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/flags.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/label_filter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/report_entry.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/types.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/version.go create mode 100644 vendor/github.com/openzipkin/zipkin-go/LICENSE create mode 100644 vendor/github.com/openzipkin/zipkin-go/idgenerator/idgenerator.go create mode 100644 vendor/github.com/openzipkin/zipkin-go/model/annotation.go create mode 100644 vendor/github.com/openzipkin/zipkin-go/model/doc.go create mode 100644 vendor/github.com/openzipkin/zipkin-go/model/endpoint.go create mode 100644 vendor/github.com/openzipkin/zipkin-go/model/kind.go create mode 100644 vendor/github.com/openzipkin/zipkin-go/model/span.go create mode 100644 vendor/github.com/openzipkin/zipkin-go/model/span_id.go create mode 100644 vendor/github.com/openzipkin/zipkin-go/model/traceid.go create mode 100644 vendor/github.com/vito/go-sse/LICENSE.md create mode 100644 vendor/github.com/vito/go-sse/sse/errors.go create mode 100644 vendor/github.com/vito/go-sse/sse/event.go create mode 100644 vendor/github.com/vito/go-sse/sse/event_source.go create mode 100644 vendor/github.com/vito/go-sse/sse/read_closer.go create mode 100644 vendor/golang.org/x/tools/LICENSE create mode 100644 vendor/golang.org/x/tools/PATENTS create mode 100644 vendor/golang.org/x/tools/cover/profile.go create mode 100644 vendor/golang.org/x/tools/go/ast/inspector/inspector.go create mode 100644 vendor/golang.org/x/tools/go/ast/inspector/typeof.go create mode 100644 vendor/google.golang.org/protobuf/protoadapt/convert.go diff --git a/collectors/applications.go b/collectors/applications.go index 887d8025..b195c68e 100644 --- a/collectors/applications.go +++ b/collectors/applications.go @@ -224,6 +224,7 @@ func (c ApplicationsCollector) reportApp(application models.Application, objs *m process = cProc } } + spaceRel, ok := application.Relationships[constant.RelationshipTypeSpace] if !ok { return fmt.Errorf("could not find space relation in application '%s'", application.GUID) @@ -290,6 +291,19 @@ func (c ApplicationsCollector) reportApp(application models.Application, objs *m string(application.State), ).Set(float64(process.Instances.Value)) + runningInstances := appSum.RunningInstances + // Use bbs data if available + if len(objs.ProcessActualLRPs) > 0 { + runningsInstances := 0 + lrps, ok := objs.ProcessActualLRPs[process.GUID] + if ok { + for _, lrp := range lrps { + if lrp.State == "RUNNING" { + runningsInstances++ + } + } + } + } c.applicationInstancesRunningMetric.WithLabelValues( application.GUID, application.Name, @@ -298,7 +312,7 @@ func (c ApplicationsCollector) reportApp(application models.Application, objs *m space.GUID, space.Name, string(application.State), - ).Set(float64(appSum.RunningInstances)) + ).Set(float64(runningInstances)) c.applicationMemoryMbMetric.WithLabelValues( application.GUID, diff --git a/collectors/collectors.go b/collectors/collectors.go index 1c38ee56..6955ecf0 100644 --- a/collectors/collectors.go +++ b/collectors/collectors.go @@ -14,7 +14,8 @@ type ObjectCollector interface { type Collector struct { workers int - config *fetcher.CFConfig + cfConfig *fetcher.CFConfig + bbsConfig *fetcher.BBSConfig filter *filters.Filter collectors []ObjectCollector } @@ -24,12 +25,14 @@ func NewCollector( environment string, deployment string, workers int, - config *fetcher.CFConfig, + cfConfig *fetcher.CFConfig, + bbsConfig *fetcher.BBSConfig, filter *filters.Filter, ) (*Collector, error) { res := &Collector{ workers: workers, - config: config, + cfConfig: cfConfig, + bbsConfig: bbsConfig, filter: filter, collectors: []ObjectCollector{}, } @@ -118,8 +121,9 @@ func NewCollector( } func (c *Collector) Collect(ch chan<- prometheus.Metric) { - fetcher := fetcher.NewFetcher(c.workers, c.config, c.filter) + fetcher := fetcher.NewFetcher(c.workers, c.cfConfig, c.bbsConfig, c.filter) objs := fetcher.GetObjects() + for _, collector := range c.collectors { collector.Collect(objs, ch) } diff --git a/fetcher/bbs_client.go b/fetcher/bbs_client.go new file mode 100644 index 00000000..bf96b2da --- /dev/null +++ b/fetcher/bbs_client.go @@ -0,0 +1,62 @@ +package fetcher + +import ( + "strings" + "time" + + "code.cloudfoundry.org/bbs" + "code.cloudfoundry.org/bbs/models" + "code.cloudfoundry.org/bbs/trace" + "code.cloudfoundry.org/lager/v3" +) + +const ( + clientSessionCacheSize int = -1 + maxIdleConnsPerHost int = -1 +) + +type BBSClient struct { + client bbs.Client + config *BBSConfig + logger lager.Logger +} + +type BBSConfig struct { + URL string `yaml:"url"` + Timeout int `yaml:"timeout"` + CAFile string `yaml:"ca_file"` + CertFile string `yaml:"cert_file"` + KeyFile string `yaml:"key_file"` + SkipCertVerify bool `yaml:"skip_cert_verify"` +} + +func NewBBSClient(config *BBSConfig) (*BBSClient, error) { + var err error + bbsClient := BBSClient{ + config: config, + logger: lager.NewLogger("bbs-client"), + } + bbsClientConfig := bbs.ClientConfig{ + URL: config.URL, + Retries: 1, + RequestTimeout: time.Duration(config.Timeout) * time.Second, + } + if strings.HasPrefix(config.URL, "https://") { + bbsClientConfig.IsTLS = true + bbsClientConfig.InsecureSkipVerify = config.SkipCertVerify + bbsClientConfig.CAFile = config.CAFile + bbsClientConfig.CertFile = config.CertFile + bbsClientConfig.KeyFile = config.KeyFile + bbsClientConfig.ClientSessionCacheSize = clientSessionCacheSize + bbsClientConfig.MaxIdleConnsPerHost = maxIdleConnsPerHost + } + bbsClient.client, err = bbs.NewClientWithConfig(bbsClientConfig) + return &bbsClient, err +} + +func (b *BBSClient) GetActualLRPs() ([]*models.ActualLRP, error) { + traceID := trace.GenerateTraceID() + actualLRPs, err := b.client.ActualLRPs(b.logger, traceID, models.ActualLRPFilter{}) + + return actualLRPs, err +} diff --git a/fetcher/fetcher.go b/fetcher/fetcher.go index 6f838744..d1a86507 100644 --- a/fetcher/fetcher.go +++ b/fetcher/fetcher.go @@ -36,14 +36,16 @@ type CFConfig struct { type Fetcher struct { sync.Mutex - config *CFConfig - worker *Worker + cfConfig *CFConfig + bbsConfig *BBSConfig + worker *Worker } -func NewFetcher(threads int, config *CFConfig, filter *filters.Filter) *Fetcher { +func NewFetcher(threads int, config *CFConfig, bbsConfig *BBSConfig, filter *filters.Filter) *Fetcher { return &Fetcher{ - config: config, - worker: NewWorker(threads, filter), + cfConfig: config, + bbsConfig: bbsConfig, + worker: NewWorker(threads, filter), } } @@ -82,20 +84,27 @@ func (c *Fetcher) workInit() { c.worker.PushIf("service_route_bindings", c.fetchServiceRouteBindings, filters.ServiceRouteBindings) c.worker.PushIf("users", c.fetchUsers, filters.Events) c.worker.PushIf("events", c.fetchEvents, filters.Events) + c.worker.PushIf("actual_lrps", c.fetchActualLRPs) } func (c *Fetcher) fetch() *models.CFObjects { result := models.NewCFObjects() - session, err := NewSessionExt(c.config) + session, err := NewSessionExt(c.cfConfig) if err != nil { log.WithError(err).Error("unable to initialize cloud foundry clients") result.Error = err return result } + bbs, err := NewBBSClient(c.bbsConfig) + if err != nil { + log.WithError(err).Error("unable to initialize bbs client") + result.Error = err + return result + } c.workInit() - result.Error = c.worker.Do(session, result) + result.Error = c.worker.Do(session, bbs, result) return result } diff --git a/fetcher/fetcher_handlers.go b/fetcher/fetcher_handlers.go index 87f63657..c5bdad2d 100644 --- a/fetcher/fetcher_handlers.go +++ b/fetcher/fetcher_handlers.go @@ -2,8 +2,11 @@ package fetcher import ( "fmt" + "regexp" "time" + models2 "code.cloudfoundry.org/bbs/models" + "code.cloudfoundry.org/cli/api/cloudcontroller/ccv3" "code.cloudfoundry.org/cli/resources" "github.com/cloudfoundry/cf_exporter/filters" @@ -17,13 +20,40 @@ func loadIndex[T any](store map[string]T, objects []T, key func(T) string) { } } -func (c *Fetcher) fetchInfo(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchActualLRPs(_ *SessionExt, bbs *BBSClient, entry *models.CFObjects) error { + if bbs == nil { + return nil + } + log.Infof("fetching resources from BBS API") + actualLRPs, err := bbs.GetActualLRPs() + if err == nil { + // match first guid as lrps process_guid field contains process_guid and instance_guid "<:process_guid>-<:instance_guid>" + re := regexp.MustCompile("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}") + for idx := 0; idx < len(actualLRPs); idx++ { + processGUID := actualLRPs[idx].ProcessGuid + match := re.FindString(processGUID) + if match != "" { + processGUID = match + } + _, ok := entry.ProcessActualLRPs[processGUID] + if !ok { + entry.ProcessActualLRPs[processGUID] = []*models2.ActualLRP{} + } + entry.ProcessActualLRPs[processGUID] = append(entry.ProcessActualLRPs[processGUID], actualLRPs[idx]) + } + } else { + log.Errorf("could not fetch actual lrps: %s", err) + } + return err +} + +func (c *Fetcher) fetchInfo(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { var err error entry.Info, err = session.GetInfo() return err } -func (c *Fetcher) fetchOrgs(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchOrgs(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { orgs, _, err := session.V3().GetOrganizations(LargeQuery) if err == nil { loadIndex(entry.Orgs, orgs, func(r resources.Organization) string { return r.GUID }) @@ -31,7 +61,7 @@ func (c *Fetcher) fetchOrgs(session *SessionExt, entry *models.CFObjects) error return err } -func (c *Fetcher) fetchOrgQuotas(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchOrgQuotas(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { quotas, err := session.GetOrganizationQuotas() if err == nil { loadIndex(entry.OrgQuotas, quotas, func(r models.Quota) string { return r.GUID }) @@ -42,7 +72,7 @@ func (c *Fetcher) fetchOrgQuotas(session *SessionExt, entry *models.CFObjects) e // fetchSpaces // 1. silent fail because space may have been deleted between listing and // summary fetching attempt. See cloudfoundry/cf_exporter#85 -func (c *Fetcher) fetchSpaces(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchSpaces(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { spaces, _, _, err := session.V3().GetSpaces(LargeQuery) if err != nil { return err @@ -53,7 +83,7 @@ func (c *Fetcher) fetchSpaces(session *SessionExt, entry *models.CFObjects) erro for idx := 0; idx < total; idx++ { space := spaces[idx] name := fmt.Sprintf("space_summaries %04d/%04d (%s)", idx, total, space.GUID) - c.worker.PushIf(name, func(session *SessionExt, entry *models.CFObjects) error { + c.worker.PushIf(name, func(session *SessionExt, bbs *BBSClient, entry *models.CFObjects) error { spaceSum, err := session.GetSpaceSummary(space.GUID) if err == nil { c.Lock() @@ -73,7 +103,7 @@ func (c *Fetcher) fetchSpaces(session *SessionExt, entry *models.CFObjects) erro return nil } -func (c *Fetcher) fetchSpaceQuotas(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchSpaceQuotas(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { quotas, err := session.GetSpaceQuotas() if err == nil { loadIndex(entry.SpaceQuotas, quotas, func(r models.Quota) string { return r.GUID }) @@ -81,7 +111,7 @@ func (c *Fetcher) fetchSpaceQuotas(session *SessionExt, entry *models.CFObjects) return err } -func (c *Fetcher) fetchApplications(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchApplications(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { apps, err := session.GetApplications() if err == nil { loadIndex(entry.Apps, apps, func(r models.Application) string { return r.GUID }) @@ -89,7 +119,7 @@ func (c *Fetcher) fetchApplications(session *SessionExt, entry *models.CFObjects return err } -func (c *Fetcher) fetchDomains(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchDomains(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { domains, _, err := session.V3().GetDomains(LargeQuery) if err == nil { loadIndex(entry.Domains, domains, func(r resources.Domain) string { return r.GUID }) @@ -97,7 +127,7 @@ func (c *Fetcher) fetchDomains(session *SessionExt, entry *models.CFObjects) err return err } -func (c *Fetcher) fetchProcesses(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchProcesses(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { processes, _, err := session.V3().GetProcesses(LargeQuery) if err != nil { return err @@ -115,7 +145,7 @@ func (c *Fetcher) fetchProcesses(session *SessionExt, entry *models.CFObjects) e return nil } -func (c *Fetcher) fetchRoutes(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchRoutes(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { routes, _, err := session.V3().GetRoutes(LargeQuery) if err == nil { loadIndex(entry.Routes, routes, func(r resources.Route) string { return r.GUID }) @@ -123,7 +153,7 @@ func (c *Fetcher) fetchRoutes(session *SessionExt, entry *models.CFObjects) erro return err } -func (c *Fetcher) fetchRouteServices(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchRouteServices(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { routes, _, _, err := session.V3().GetRouteBindings(LargeQuery) if err == nil { loadIndex(entry.RoutesBindings, routes, func(r resources.RouteBinding) string { return r.RouteGUID }) @@ -131,7 +161,7 @@ func (c *Fetcher) fetchRouteServices(session *SessionExt, entry *models.CFObject return err } -func (c *Fetcher) fetchSecurityGroups(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchSecurityGroups(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { securitygroups, _, err := session.V3().GetSecurityGroups(LargeQuery) if err == nil { loadIndex(entry.SecurityGroups, securitygroups, func(r resources.SecurityGroup) string { return r.GUID }) @@ -139,7 +169,7 @@ func (c *Fetcher) fetchSecurityGroups(session *SessionExt, entry *models.CFObjec return err } -func (c *Fetcher) fetchStacks(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchStacks(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { stacks, _, err := session.V3().GetStacks(LargeQuery) if err == nil { loadIndex(entry.Stacks, stacks, func(r resources.Stack) string { return r.GUID }) @@ -147,7 +177,7 @@ func (c *Fetcher) fetchStacks(session *SessionExt, entry *models.CFObjects) erro return err } -func (c *Fetcher) fetchBuildpacks(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchBuildpacks(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { buildpacks, _, err := session.V3().GetBuildpacks(LargeQuery) if err == nil { loadIndex(entry.Buildpacks, buildpacks, func(r resources.Buildpack) string { return r.GUID }) @@ -155,7 +185,7 @@ func (c *Fetcher) fetchBuildpacks(session *SessionExt, entry *models.CFObjects) return err } -func (c *Fetcher) fetchTasks(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchTasks(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { tasks, err := session.GetTasks() if err == nil { loadIndex(entry.Tasks, tasks, func(r models.Task) string { return r.GUID }) @@ -163,7 +193,7 @@ func (c *Fetcher) fetchTasks(session *SessionExt, entry *models.CFObjects) error return err } -func (c *Fetcher) fetchServiceBrokers(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchServiceBrokers(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { servicebrokers, _, err := session.V3().GetServiceBrokers(LargeQuery) if err == nil { loadIndex(entry.ServiceBrokers, servicebrokers, func(r resources.ServiceBroker) string { return r.GUID }) @@ -171,7 +201,7 @@ func (c *Fetcher) fetchServiceBrokers(session *SessionExt, entry *models.CFObjec return err } -func (c *Fetcher) fetchServiceOfferings(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchServiceOfferings(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { serviceofferings, _, err := session.V3().GetServiceOfferings(LargeQuery) if err == nil { loadIndex(entry.ServiceOfferings, serviceofferings, func(r resources.ServiceOffering) string { return r.GUID }) @@ -179,7 +209,7 @@ func (c *Fetcher) fetchServiceOfferings(session *SessionExt, entry *models.CFObj return err } -func (c *Fetcher) fetchServiceInstances(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchServiceInstances(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { serviceinstances, _, _, err := session.V3().GetServiceInstances(LargeQuery) if err == nil { loadIndex(entry.ServiceInstances, serviceinstances, func(r resources.ServiceInstance) string { return r.GUID }) @@ -187,7 +217,7 @@ func (c *Fetcher) fetchServiceInstances(session *SessionExt, entry *models.CFObj return err } -func (c *Fetcher) fetchServicePlans(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchServicePlans(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { plans, _, err := session.V3().GetServicePlans() if err == nil { loadIndex(entry.ServicePlans, plans, func(r resources.ServicePlan) string { return r.GUID }) @@ -195,7 +225,7 @@ func (c *Fetcher) fetchServicePlans(session *SessionExt, entry *models.CFObjects return err } -func (c *Fetcher) fetchServiceBindings(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchServiceBindings(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { bindings, _, err := session.V3().GetServiceCredentialBindings(LargeQuery) if err == nil { loadIndex(entry.ServiceBindings, bindings, func(r resources.ServiceCredentialBinding) string { return r.GUID }) @@ -203,7 +233,7 @@ func (c *Fetcher) fetchServiceBindings(session *SessionExt, entry *models.CFObje return err } -func (c *Fetcher) fetchServiceRouteBindings(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchServiceRouteBindings(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { routeBindings, _, _, err := session.V3().GetRouteBindings(LargeQuery) if err == nil { loadIndex(entry.ServiceRouteBindings, routeBindings, func(r resources.RouteBinding) string { return r.GUID }) @@ -211,7 +241,7 @@ func (c *Fetcher) fetchServiceRouteBindings(session *SessionExt, entry *models.C return err } -func (c *Fetcher) fetchIsolationSegments(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchIsolationSegments(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { segments, _, err := session.V3().GetIsolationSegments() if err == nil { loadIndex(entry.Segments, segments, func(r resources.IsolationSegment) string { return r.GUID }) @@ -219,7 +249,7 @@ func (c *Fetcher) fetchIsolationSegments(session *SessionExt, entry *models.CFOb return err } -func (c *Fetcher) fetchUsers(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchUsers(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { users, _, err := session.V3().GetUsers(LargeQuery) if err == nil { loadIndex(entry.Users, users, func(r resources.User) string { return r.GUID }) @@ -230,7 +260,7 @@ func (c *Fetcher) fetchUsers(session *SessionExt, entry *models.CFObjects) error // fetchEvents - // 1. create query param "created_ats[gt]=(now - 15min)". There is no point scrapping more // data since the event metric will filter out events older than last scrap. -func (c *Fetcher) fetchEvents(session *SessionExt, entry *models.CFObjects) error { +func (c *Fetcher) fetchEvents(session *SessionExt, _ *BBSClient, entry *models.CFObjects) error { // 1. location, _ := time.LoadLocation("UTC") since := time.Now().Add(-1 * 15 * time.Minute) diff --git a/fetcher/fetcher_test.go b/fetcher/fetcher_test.go index 7d2a8033..844b9003 100644 --- a/fetcher/fetcher_test.go +++ b/fetcher/fetcher_test.go @@ -19,7 +19,7 @@ var _ = ginkgo.Describe("Fetcher", func() { ginkgo.JustBeforeEach(func() { f, err := filters.NewFilter(active...) gomega.Ω(err).ShouldNot(gomega.HaveOccurred()) - fetcher = NewFetcher(10, &CFConfig{}, f) + fetcher = NewFetcher(10, &CFConfig{}, &BBSConfig{}, f) gomega.Ω(fetcher).ShouldNot(gomega.BeNil()) fetcher.workInit() diff --git a/fetcher/worker.go b/fetcher/worker.go index ce9487e7..36a982df 100644 --- a/fetcher/worker.go +++ b/fetcher/worker.go @@ -9,7 +9,7 @@ import ( log "github.com/sirupsen/logrus" ) -type WorkHandler func(*SessionExt, *models.CFObjects) error +type WorkHandler func(*SessionExt, *BBSClient, *models.CFObjects) error type Work struct { name string @@ -52,9 +52,9 @@ func (c *Worker) Reset() { c.errs = make(chan error, 1000) } -func (c *Worker) Do(session *SessionExt, result *models.CFObjects) error { +func (c *Worker) Do(session *SessionExt, bbs *BBSClient, result *models.CFObjects) error { for i := 0; i < c.threads; i++ { - go c.run(i, session, result) + go c.run(i, session, bbs, result) } return c.Wait() } @@ -72,7 +72,7 @@ func (c *Worker) Wait() error { return nil } -func (c *Worker) run(id int, session *SessionExt, entry *models.CFObjects) { +func (c *Worker) run(id int, session *SessionExt, bbs *BBSClient, entry *models.CFObjects) { for { work, ok := <-c.list if !ok { @@ -80,7 +80,7 @@ func (c *Worker) run(id int, session *SessionExt, entry *models.CFObjects) { } log.Debugf("[%2d] %s", id, work.name) start := time.Now() - err := work.handler(session, entry) + err := work.handler(session, bbs, entry) duration := time.Since(start) if err != nil { log.Errorf("[%2d] %s error: %s", id, work.name, err) diff --git a/filters/filters.go b/filters/filters.go index b190e8cf..b4d3c247 100644 --- a/filters/filters.go +++ b/filters/filters.go @@ -22,6 +22,7 @@ const ( Spaces = "spaces" Stacks = "stacks" Tasks = "tasks" + InstancesRunning = "instances_running" ) var ( @@ -68,6 +69,7 @@ func NewFilter(active ...string) (*Filter, error) { Stacks: true, Tasks: false, Events: false, + InstancesRunning: false, }, } @@ -99,6 +101,7 @@ func (f *Filter) setActive(active []string) error { Stacks: false, Tasks: false, Events: false, + InstancesRunning: false, } // enable only given filters @@ -115,7 +118,7 @@ func (f *Filter) setActive(active []string) error { func (f *Filter) Enabled(name string) bool { status, ok := f.activated[name] - return (ok && status) + return ok && status } func (f *Filter) Any(names ...string) bool { diff --git a/go.mod b/go.mod index 557e9541..5050ee31 100644 --- a/go.mod +++ b/go.mod @@ -5,10 +5,12 @@ go 1.21 toolchain go1.21.1 require ( + code.cloudfoundry.org/bbs v0.0.0-20240521125508-20d3971ce31b code.cloudfoundry.org/cli v0.0.0-20240122193559-1d05b71bb887 + code.cloudfoundry.org/lager/v3 v3.0.3 github.com/cloudfoundry-community/go-cf-clients-helper/v2 v2.5.0 github.com/onsi/ginkgo v1.16.5 - github.com/onsi/gomega v1.31.0 + github.com/onsi/gomega v1.33.0 github.com/prometheus/client_golang v1.17.0 github.com/prometheus/common v0.45.0 github.com/sirupsen/logrus v1.9.3 @@ -17,11 +19,13 @@ require ( require ( code.cloudfoundry.org/bytefmt v0.0.0-20230612151507-41ef4d1f67a4 // indirect + code.cloudfoundry.org/cfhttp/v2 v2.1.0 // indirect code.cloudfoundry.org/cfnetworking-cli-api v0.0.0-20190103195135-4b04f26287a6 // indirect code.cloudfoundry.org/clock v1.1.0 // indirect code.cloudfoundry.org/go-log-cache/v2 v2.0.6 // indirect code.cloudfoundry.org/go-loggregator/v9 v9.1.0 // indirect code.cloudfoundry.org/jsonry v1.1.4 // indirect + code.cloudfoundry.org/locket v0.0.0-20240521151413-b344fdd15d03 // indirect code.cloudfoundry.org/tlsconfig v0.0.0-20230612153104-23c0622de227 // indirect code.cloudfoundry.org/ykk v0.0.0-20170424192843-e4df4ce2fd4d // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect @@ -40,13 +44,18 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/fatih/color v1.16.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-sql-driver/mysql v1.8.1 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/go-test/deep v1.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 // indirect github.com/imdario/mergo v0.3.15 // indirect + github.com/jackc/pgx/v5 v5.6.0 // indirect github.com/jessevdk/go-flags v1.5.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/lunixbochs/vtclean v1.0.0 // indirect @@ -58,6 +67,8 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/nxadm/tail v1.4.8 // indirect + github.com/onsi/ginkgo/v2 v2.17.3 // indirect + github.com/openzipkin/zipkin-go v0.4.2 // indirect github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect github.com/prometheus/procfs v0.11.1 // indirect github.com/rivo/uniseg v0.4.4 // indirect @@ -65,13 +76,15 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/tedsuo/rata v1.0.1-0.20170830210128-07d200713958 // indirect github.com/vito/go-interact v1.0.0 // indirect - golang.org/x/crypto v0.21.0 // indirect - golang.org/x/net v0.23.0 // indirect + github.com/vito/go-sse v1.0.0 // indirect + golang.org/x/crypto v0.22.0 // indirect + golang.org/x/net v0.24.0 // indirect golang.org/x/oauth2 v0.13.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/term v0.18.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.20.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect diff --git a/go.sum b/go.sum index d9572b7f..a041ff4a 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,9 @@ +code.cloudfoundry.org/bbs v0.0.0-20240521125508-20d3971ce31b h1:bW4BnYKV5EdCwaTYhIvyHVLWFvjBG9fQ8h8IYAQv7lo= +code.cloudfoundry.org/bbs v0.0.0-20240521125508-20d3971ce31b/go.mod h1:XKlGVVXFi5EcHHMPzw3xgONK9PeEZuUbIC43XNwxD10= code.cloudfoundry.org/bytefmt v0.0.0-20230612151507-41ef4d1f67a4 h1:9G5F8zgma5v0GdDvNz6iZwwJp3RS/z0SY/aHGfVwvTo= code.cloudfoundry.org/bytefmt v0.0.0-20230612151507-41ef4d1f67a4/go.mod h1:wYHCXH/gI19ujoFVuMkY48qPpPCoHLKBKXPkn67h/Yc= +code.cloudfoundry.org/cfhttp/v2 v2.1.0 h1:HbQ5H2R+HEKG/rcB6Gk3okeC3h2fAC4PPnLQoMHvzZM= +code.cloudfoundry.org/cfhttp/v2 v2.1.0/go.mod h1:k9R36Y/9dUc9OsX4dfDuEjHZ7Q00ttklKQj6HD6h6+U= code.cloudfoundry.org/cfnetworking-cli-api v0.0.0-20190103195135-4b04f26287a6 h1:Yc9r1p21kEpni9WlG4mwOZw87TB2QlyS9sAEebZ3+ak= code.cloudfoundry.org/cfnetworking-cli-api v0.0.0-20190103195135-4b04f26287a6/go.mod h1:u5FovqC5GGAEbFPz+IdjycDA+gIjhUwqxnu0vbHwVeM= code.cloudfoundry.org/cli v0.0.0-20240122193559-1d05b71bb887 h1:n20nNi5SnRBetn6tmQVA1UqTag1KUjpLxgpqMyCjVQA= @@ -20,12 +24,16 @@ code.cloudfoundry.org/jsonry v1.1.4 h1:P9N7IlH1/4aRCLcXLgLFj1hkcBmV7muijJzY+K6U4 code.cloudfoundry.org/jsonry v1.1.4/go.mod h1:6aKilShQP7w/Ez76h1El2/n9y2OkHuU56nKSBB9Gp0A= code.cloudfoundry.org/lager v2.0.0+incompatible h1:WZwDKDB2PLd/oL+USK4b4aEjUymIej9My2nUQ9oWEwQ= code.cloudfoundry.org/lager v2.0.0+incompatible/go.mod h1:O2sS7gKP3HM2iemG+EnwvyNQK7pTSC6Foi4QiMp9sSk= +code.cloudfoundry.org/lager/v3 v3.0.3 h1:/UTmadZfIaKuT/whEinSxK1mzRfNu1uPfvjFfGqiwzM= +code.cloudfoundry.org/lager/v3 v3.0.3/go.mod h1:Zn5q1SrIuuHjEUE7xerMKt3ztunrJQCZETAo7rV0CH8= +code.cloudfoundry.org/locket v0.0.0-20240521151413-b344fdd15d03 h1:jrunp9b2C7KMCIYlgU8CGxRa0u4A1EtJiYHaZRSo5GQ= +code.cloudfoundry.org/locket v0.0.0-20240521151413-b344fdd15d03/go.mod h1:AwHLRkdXtttLXNB8RHgLfErJ2kKafH62AR2OClhy6xI= code.cloudfoundry.org/tlsconfig v0.0.0-20230612153104-23c0622de227 h1:QYyb6Ur0Ys6FciDB3+8zCW3eVk7AxAs2++Foa5DAdt0= code.cloudfoundry.org/tlsconfig v0.0.0-20230612153104-23c0622de227/go.mod h1:C8SxvGRSutmgzV2FxH8Zwqz2Q8HsaAITQRQFKhlDzPw= code.cloudfoundry.org/ykk v0.0.0-20170424192843-e4df4ce2fd4d h1:M+zXqtXJqcsmpL76aU0tdl1ho23eYa4axYoM4gD62UA= code.cloudfoundry.org/ykk v0.0.0-20170424192843-e4df4ce2fd4d/go.mod h1:YUJiVOr5xl0N/RjMxM1tHmgSpBbi5UM+KoVR5AoejO0= -filippo.io/edwards25519 v1.0.0-rc.1 h1:m0VOOB23frXZvAOK44usCgLWvtsxIoMCTBGJZlpmGfU= -filippo.io/edwards25519 v1.0.0-rc.1/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= @@ -73,17 +81,22 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= +github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -113,8 +126,8 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20230907193218-d3ddc7976beb h1:LCMfzVg3sflxTs4UvuP4D8CkoZnfHLe2qzqgDn/4OHs= -github.com/google/pprof v0.0.0-20230907193218-d3ddc7976beb/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 h1:6UKoz5ujsI55KNpsJH3UwCq3T8kKbZwNZBNPuTTje8U= @@ -123,6 +136,14 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY= +github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw= +github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= +github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -137,8 +158,9 @@ github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3x github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8 h1:AkaSdXYQOWeaO3neb8EM634ahkXXe3jYbVh/F9lq+GI= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lunixbochs/vtclean v1.0.0 h1:xu2sLAri4lGiovBDQKxl5mrXyESr3gUr5m5SM5+LVb8= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= @@ -167,21 +189,25 @@ github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= -github.com/onsi/ginkgo/v2 v2.13.2 h1:Bi2gGVkfn6gQcjNjZJVO8Gf0FHzMPf2phUei9tejVMs= -github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM= +github.com/onsi/ginkgo/v2 v2.17.3 h1:oJcvKpIb7/8uLpDDtnQuf18xVnwKp8DTD7DQ6gTd/MU= +github.com/onsi/ginkgo/v2 v2.17.3/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.20.0/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= -github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE= -github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk= +github.com/onsi/gomega v1.33.0 h1:snPCflnZrpMsy94p4lXVEkHo12lmPnc3vY5XBbreexE= +github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY= +github.com/openzipkin/zipkin-go v0.4.2 h1:zjqfqHjUpPmB3c1GlCvvgsM1G4LkvqQbBDueDOCg/jA= +github.com/openzipkin/zipkin-go v0.4.2/go.mod h1:ZeVkFjuuBiSy13y8vpSDCjMi9GoI3hPpCJSBx/EYFhY= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -214,10 +240,14 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/tedsuo/ifrit v0.0.0-20230330192023-5cba443a66c4 h1:MGZzzxBuPuK4J0XQo+0uy0NnXQGKzHXhYp5oG1Wy860= +github.com/tedsuo/ifrit v0.0.0-20230330192023-5cba443a66c4/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= github.com/tedsuo/rata v1.0.1-0.20170830210128-07d200713958 h1:mueRRuRjR35dEOkHdhpoRcruNgBz0ohG659HxxmcAwA= github.com/tedsuo/rata v1.0.1-0.20170830210128-07d200713958/go.mod h1:X47ELzhOoLbfFIY0Cql9P6yo3Cdwf2CMX3FVZxRzJPc= github.com/vito/go-interact v1.0.0 h1:niLW3NjGoMWOayoR6iQ8AxWVM1Q4rR8VGZ1mt6uK3BM= github.com/vito/go-interact v1.0.0/go.mod h1:W1mz+UVUZScRM3eUjQhEQiLDnQ+yLnXkB2rjBfGPrXg= +github.com/vito/go-sse v1.0.0 h1:e6/iTrrvy8BRrOwJwmQmlndlil+TLdxXvHi55ZDzH6M= +github.com/vito/go-sse v1.0.0/go.mod h1:2wkcaQ+jtlZ94Uve8gYZjFpL68luAjssTINA2hpgcZs= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= @@ -229,8 +259,8 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -248,8 +278,8 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -258,6 +288,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -283,12 +315,12 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -306,8 +338,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= +golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -346,6 +378,7 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/main.go b/main.go index ee9ca358..6b3d6e39 100644 --- a/main.go +++ b/main.go @@ -17,6 +17,30 @@ import ( ) var ( + bbsAPIUrl = kingpin.Flag( + "bbs.api_url", "BBS API URL ($CF_EXPORTER_BBS_API_URL)", + ).Envar("CF_EXPORTER_BBS_API_URL").String() + + bbsTimeout = kingpin.Flag( + "bbs.timeout", "BBS API Timeout ($CF_EXPORTER_BBS_TIMEOUT)", + ).Envar("CF_EXPORTER_BBS_TIMEOUT").Default("10").Int() + + bbsCAFile = kingpin.Flag( + "bbs.ca_file", "BBS CA File ($CF_EXPORTER_BBS_CA_FILE)", + ).Envar("CF_EXPORTER_BBS_CA_FILE").Default("").String() + + bbsCertFile = kingpin.Flag( + "bbs.cert_file", "BBS Cert File ($CF_EXPORTER_BBS_CERT_FILE)", + ).Envar("CF_EXPORTER_BBS_CERT_FILE").Default("").String() + + bbsKeyFile = kingpin.Flag( + "bbs.key_file", "BBS Key File ($CF_EXPORTER_BBS_KEY_FILE)", + ).Envar("CF_EXPORTER_BBS_KEY_FILE").String() + + bbsSkipSSLValidation = kingpin.Flag( + "bbs.skip_ssl_verify", "Disable SSL Verify for BBS ($CF_EXPORTER_BBS_SKIP_SSL_VERIFY)", + ).Envar("CF_EXPORTER_BBS_SKIP_SSL_VERIFY").Default("false").Bool() + cfAPIUrl = kingpin.Flag( "cf.api_url", "Cloud Foundry API URL ($CF_EXPORTER_CF_API_URL)", ).Envar("CF_EXPORTER_CF_API_URL").String() @@ -42,7 +66,7 @@ var ( ).Envar("CF_EXPORTER_CF_DEPLOYMENT_NAME").Required().String() filterCollectors = kingpin.Flag( - "filter.collectors", "Comma separated collectors to filter (Applications,Buildpacks,Events,IsolationSegments,Organizations,Routes,SecurityGroups,ServiceBindings,ServiceInstances,ServicePlans,Services,Spaces,Stacks,Tasks). If not set, all collectors except Events and Tasks are enabled ($CF_EXPORTER_FILTER_COLLECTORS)", + "filter.collectors", "Comma separated collectors to filter (Applications,Buildpacks,Events,IsolationSegments,Organizations,Routes,SecurityGroups,ServiceBindings,ServiceInstances,ServicePlans,Services,Spaces,Stacks,Tasks,ActualLRPs). If not set, all collectors except Events and Tasks are enabled ($CF_EXPORTER_FILTER_COLLECTORS)", ).Envar("CF_EXPORTER_FILTER_COLLECTORS").Default("").String() metricsNamespace = kingpin.Flag( @@ -152,7 +176,7 @@ func main() { } log.SetLevel(lvl) - config := &fetcher.CFConfig{ + cfConfig := &fetcher.CFConfig{ URL: *cfAPIUrl, Username: *cfUsername, Password: *cfPassword, @@ -161,6 +185,18 @@ func main() { SkipSSLValidation: *skipSSLValidation, } + bbsConfig := &fetcher.BBSConfig{ + URL: *bbsAPIUrl, + Timeout: *bbsTimeout, + CAFile: *bbsCAFile, + CertFile: *bbsCertFile, + KeyFile: *bbsKeyFile, + SkipCertVerify: *bbsSkipSSLValidation, + } + + log.Infof("cfConfig: %+v", cfConfig) + log.Infof("bbsConfig: %+v", bbsConfig) + active := []string{} if len(*filterCollectors) != 0 { active = strings.Split(*filterCollectors, ",") @@ -171,7 +207,7 @@ func main() { os.Exit(1) } - c, err := collectors.NewCollector(*metricsNamespace, *metricsEnvironment, *cfDeploymentName, *workers, config, filter) + c, err := collectors.NewCollector(*metricsNamespace, *metricsEnvironment, *cfDeploymentName, *workers, cfConfig, bbsConfig, filter) if err != nil { log.Error(err) os.Exit(1) diff --git a/models/model.go b/models/model.go index ffda133a..c7093fbe 100644 --- a/models/model.go +++ b/models/model.go @@ -3,6 +3,8 @@ package models import ( "time" + "code.cloudfoundry.org/bbs/models" + "code.cloudfoundry.org/cli/api/cloudcontroller/ccv3/constant" "code.cloudfoundry.org/cli/resources" "code.cloudfoundry.org/cli/types" @@ -32,6 +34,7 @@ type CFObjects struct { SpaceSummaries map[string]SpaceSummary `json:"space_summaries"` AppSummaries map[string]AppSummary `json:"app_summaries"` AppProcesses map[string][]resources.Process `json:"app_processes"` + ProcessActualLRPs map[string][]*models.ActualLRP `json:"process_actual_lrps"` Events map[string]Event `json:"events"` Users map[string]resources.User `json:"users"` ServiceRouteBindings map[string]resources.RouteBinding `json:"service_route_bindings"` @@ -173,6 +176,7 @@ func NewCFObjects() *CFObjects { SpaceSummaries: map[string]SpaceSummary{}, AppSummaries: map[string]AppSummary{}, AppProcesses: map[string][]resources.Process{}, + ProcessActualLRPs: map[string][]*models.ActualLRP{}, Users: map[string]resources.User{}, Events: map[string]Event{}, ServiceRouteBindings: map[string]resources.RouteBinding{}, diff --git a/vendor/code.cloudfoundry.org/bbs/.gitignore b/vendor/code.cloudfoundry.org/bbs/.gitignore new file mode 100644 index 00000000..008a3fc6 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/.gitignore @@ -0,0 +1,3 @@ +*.coverprofile +*.test +cmd/bbs/bbs diff --git a/vendor/code.cloudfoundry.org/bbs/CODEOWNERS b/vendor/code.cloudfoundry.org/bbs/CODEOWNERS new file mode 100644 index 00000000..6a633c7e --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/CODEOWNERS @@ -0,0 +1 @@ +* @cloudfoundry/wg-app-runtime-platform-diego-approvers diff --git a/vendor/code.cloudfoundry.org/bbs/LICENSE b/vendor/code.cloudfoundry.org/bbs/LICENSE new file mode 100644 index 00000000..f49a4e16 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/code.cloudfoundry.org/bbs/NOTICE b/vendor/code.cloudfoundry.org/bbs/NOTICE new file mode 100644 index 00000000..5f623629 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/NOTICE @@ -0,0 +1,18 @@ +Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +This project may include a number of subcomponents with separate +copyright notices and license terms. Your use of these subcomponents +is subject to the terms and conditions of each subcomponent's license, +as noted in the LICENSE file. diff --git a/vendor/code.cloudfoundry.org/bbs/README.md b/vendor/code.cloudfoundry.org/bbs/README.md new file mode 100644 index 00000000..734061c0 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/README.md @@ -0,0 +1,83 @@ +# BBS Server [![GoDoc](https://godoc.org/github.com/cloudfoundry/bbs?status.svg)](https://godoc.org/github.com/cloudfoundry/bbs) + +**Note**: This repository should be imported as `code.cloudfoundry.org/bbs`. + +API to access the database for Diego. + +A general overview of the BBS is documented [here](doc). + +## Reporting issues and requesting features + +Please report all issues and feature requests in [cloudfoundry/diego-release](https://github.com/cloudfoundry/diego-release/issues). + +## API + +To interact with the BBS from outside of Diego, use the methods provided on the +[`Client` interface](https://godoc.org/github.com/cloudfoundry/bbs#Client). + +Components within Diego may use the full [`InternalClient` +interface](https://godoc.org/github.com/cloudfoundry/bbs#InternalClient) to modify internal state. + +## Code Generation + +The protobuf models in this repository require version 3.5 or later of the `protoc` compiler. + +### OSX + +On Mac OS X with [Homebrew](http://brew.sh/), run the following to install it: + +``` +brew install protobuf +``` + +### Linux + +1. Download a zip archive of the latest protobuf release from [here](https://github.com/google/protobuf/releases). +1. Unzip the archive in `/usr/local` (including /bin and /include folders). +1. `chmod a+x /usr/local/bin/protoc` to make sure you can use the binary. + +> If you already have an older version of protobuf installed, you must +> uninstall it first by running `brew uninstall protobuf` + +Install the `gogoproto` compiler by running: + +``` +go install github.com/gogo/protobuf/protoc-gen-gogoslick +``` + +Run `go generate ./...` from the root directory of this repository to generate code from the `.proto` files as well as to generate fake implementations of certain interfaces for use in test code. + +### Generating ruby models for BBS models + +The following documentation assume the following versions: + +1. [protoc](https://github.com/google/protobuf/releases) `> v3.5.0` +2. [ruby protobuf gem](https://github.com/ruby-protobuf/protobuf) `> 3.6.12` + +Run the following commands from the `models` directory to generate `.pb.rb` +files for the BBS models: + +1. `sed -i'' -e 's/package models/package diego.bbs.models/' ./*.proto` +1. `protoc -I../../vendor --proto_path=. --ruby_out=/path/to/ruby/files *.proto` + +**Note** Replace `/path/to/ruby/files` with the desired destination of the +`.pb.rb` files. That directory must exist before running this command. + +**Note** The above steps assume that +`github.com/gogo/protobuf/gogoproto/gogo.proto` is on the `GOPATH`. + +## SQL + +See the instructions in [Running the SQL Unit Tests](https://github.com/cloudfoundry/diego-release/blob/develop/CONTRIBUTING.md#running-the-sql-unit-tests) +for testing against a SQL backend + +See [Migrations](https://github.com/cloudfoundry/bbs/blob/master/doc/bbs-migration.md) for information about writing database migrations. + +## Run Tests + +1. First setup your [GOPATH and install the necessary dependencies](https://github.com/cloudfoundry/diego-release/blob/develop/CONTRIBUTING.md#initial-setup) for running tests. +1. Setup a MySQL server or a postgres server. [Please follow these instructions.](https://github.com/cloudfoundry/diego-release/blob/develop/CONTRIBUTING.md#running-the-sql-unit-tests) +1. Run the tests from the root directory of the bbs repo: +``` +SQL_FLAVOR=mysql ginkgo -r -p -race +``` diff --git a/vendor/code.cloudfoundry.org/bbs/client.go b/vendor/code.cloudfoundry.org/bbs/client.go new file mode 100644 index 00000000..c210bdd5 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/client.go @@ -0,0 +1,1019 @@ +package bbs + +import ( + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "mime" + "net" + "net/http" + "net/url" + "time" + + "code.cloudfoundry.org/bbs/events" + "code.cloudfoundry.org/bbs/models" + "code.cloudfoundry.org/bbs/trace" + cfhttp "code.cloudfoundry.org/cfhttp/v2" + "code.cloudfoundry.org/lager/v3" + "code.cloudfoundry.org/tlsconfig" + "github.com/gogo/protobuf/proto" + "github.com/tedsuo/rata" + "github.com/vito/go-sse/sse" +) + +const ( + ContentTypeHeader = "Content-Type" + XCfRouterErrorHeader = "X-Cf-Routererror" + ProtoContentType = "application/x-protobuf" + KeepContainer = true + DeleteContainer = false + DefaultRetryCount = 3 + + InvalidResponseMessage = "Invalid Response with status code: %d" +) + +var EndpointNotFoundErr = models.NewError(models.Error_InvalidResponse, fmt.Sprintf(InvalidResponseMessage, 404)) + +//go:generate counterfeiter -generate + +//counterfeiter:generate -o fake_bbs/fake_internal_client.go . InternalClient +//counterfeiter:generate -o fake_bbs/fake_client.go . Client + +/* +The InternalClient interface exposes all available endpoints of the BBS server, +including private endpoints which should be used exclusively by internal Diego +components. To interact with the BBS from outside of Diego, the Client +should be used instead. +*/ +type InternalClient interface { + Client + + ClaimActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey) error + StartActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey, netInfo *models.ActualLRPNetInfo, internalRoutes []*models.ActualLRPInternalRoute, metricTags map[string]string, routable bool, availabilityZone string) error + CrashActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey, errorMessage string) error + FailActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, errorMessage string) error + RemoveActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey) error + + EvacuateClaimedActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey) (bool, error) + EvacuateRunningActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey, netInfo *models.ActualLRPNetInfo, internalRoutes []*models.ActualLRPInternalRoute, metricTags map[string]string, routable bool, availabilityZone string) (bool, error) + EvacuateStoppedActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey) (bool, error) + EvacuateCrashedActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey, errorMessage string) (bool, error) + RemoveEvacuatingActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey) error + + StartTask(logger lager.Logger, traceID string, taskGuid string, cellID string) (bool, error) + FailTask(logger lager.Logger, traceID string, taskGuid, failureReason string) error + RejectTask(logger lager.Logger, traceID string, taskGuid, failureReason string) error + CompleteTask(logger lager.Logger, traceID string, taskGuid, cellId string, failed bool, failureReason, result string) error +} + +/* +The External InternalClient can be used to access the BBS's public functionality. +It exposes methods for basic LRP and Task Lifecycles, Domain manipulation, and +event subscription. +*/ +type Client interface { + ExternalTaskClient + ExternalDomainClient + ExternalActualLRPClient + ExternalDesiredLRPClient + ExternalEventClient + + // Returns true if the BBS server is reachable + Ping(logger lager.Logger, traceID string) bool + + // Lists all Cells + Cells(logger lager.Logger, traceID string) ([]*models.CellPresence, error) +} + +/* +The ExternalTaskClient is used to access Diego's ability to run one-off tasks. +More information about this API can be found in the bbs docs: + +https://code.cloudfoundry.org/bbs/tree/master/doc/tasks.md +*/ +type ExternalTaskClient interface { + // Creates a Task from the given TaskDefinition + DesireTask(logger lager.Logger, traceID string, guid string, domain string, def *models.TaskDefinition) error + + // Lists all Tasks + Tasks(logger lager.Logger, traceID string) ([]*models.Task, error) + + // List all Tasks that match filter + TasksWithFilter(logger lager.Logger, traceID string, filter models.TaskFilter) ([]*models.Task, error) + + // Lists all Tasks of the given domain + TasksByDomain(logger lager.Logger, traceID string, domain string) ([]*models.Task, error) + + // Lists all Tasks on the given cell + TasksByCellID(logger lager.Logger, traceID string, cellId string) ([]*models.Task, error) + + // Returns the Task with the given guid + TaskByGuid(logger lager.Logger, traceID string, guid string) (*models.Task, error) + + // Cancels the Task with the given task guid + CancelTask(logger lager.Logger, traceID string, taskGuid string) error + + // Resolves a Task with the given guid + ResolvingTask(logger lager.Logger, traceID string, taskGuid string) error + + // Deletes a completed task with the given guid + DeleteTask(logger lager.Logger, traceID string, taskGuid string) error +} + +/* +The ExternalDomainClient is used to access and update Diego's domains. +*/ +type ExternalDomainClient interface { + // Lists the active domains + Domains(logger lager.Logger, traceID string) ([]string, error) + + // Creates a domain or bumps the ttl on an existing domain + UpsertDomain(logger lager.Logger, traceID string, domain string, ttl time.Duration) error +} + +/* +The ExternalActualLRPClient is used to access and retire Actual LRPs +*/ +type ExternalActualLRPClient interface { + // Returns all ActualLRPs matching the given ActualLRPFilter + ActualLRPs(lager.Logger, string, models.ActualLRPFilter) ([]*models.ActualLRP, error) + + // Returns all ActualLRPGroups matching the given ActualLRPFilter + //lint:ignore SA1019 - deprecated function returning deprecated data + // Deprecated: use ActualLRPs instead + ActualLRPGroups(lager.Logger, string, models.ActualLRPFilter) ([]*models.ActualLRPGroup, error) + + // Returns all ActualLRPGroups that have the given process guid + //lint:ignore SA1019 - deprecated function returning deprecated data + // Deprecated: use ActualLRPs instead + ActualLRPGroupsByProcessGuid(logger lager.Logger, traceID string, processGuid string) ([]*models.ActualLRPGroup, error) + + // Returns the ActualLRPGroup with the given process guid and instance index + //lint:ignore SA1019 - deprecated function returning deprecated data + // Deprecated: use ActualLRPs instead + ActualLRPGroupByProcessGuidAndIndex(logger lager.Logger, traceID string, processGuid string, index int) (*models.ActualLRPGroup, error) + + // Shuts down the ActualLRP matching the given ActualLRPKey, but does not modify the desired state + RetireActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey) error +} + +/* +The ExternalDesiredLRPClient is used to access and manipulate Desired LRPs. +*/ +type ExternalDesiredLRPClient interface { + // Lists all DesiredLRPs that match the given DesiredLRPFilter + DesiredLRPs(lager.Logger, string, models.DesiredLRPFilter) ([]*models.DesiredLRP, error) + + // Returns the DesiredLRP with the given process guid + DesiredLRPByProcessGuid(logger lager.Logger, traceID string, processGuid string) (*models.DesiredLRP, error) + + // Returns all DesiredLRPSchedulingInfos that match the given DesiredLRPFilter + DesiredLRPSchedulingInfos(lager.Logger, string, models.DesiredLRPFilter) ([]*models.DesiredLRPSchedulingInfo, error) + + //Returns the DesiredLRPSchedulingInfo that matches the given process guid + DesiredLRPSchedulingInfoByProcessGuid(logger lager.Logger, traceID string, processGuid string) (*models.DesiredLRPSchedulingInfo, error) + + // Returns all DesiredLRPRoutingInfos that match the given DesiredLRPFilter + DesiredLRPRoutingInfos(lager.Logger, string, models.DesiredLRPFilter) ([]*models.DesiredLRP, error) + + // Creates the given DesiredLRP and its corresponding ActualLRPs + DesireLRP(lager.Logger, string, *models.DesiredLRP) error + + // Updates the DesiredLRP matching the given process guid + UpdateDesiredLRP(logger lager.Logger, traceID string, processGuid string, update *models.DesiredLRPUpdate) error + + // Removes the DesiredLRP matching the given process guid + RemoveDesiredLRP(logger lager.Logger, traceID string, processGuid string) error +} + +/* +The ExternalEventClient is used to subscribe to groups of Events. +*/ +type ExternalEventClient interface { + // Deprecated: use SubscribeToInstanceEvents instead + SubscribeToEvents(logger lager.Logger) (events.EventSource, error) + + SubscribeToInstanceEvents(logger lager.Logger) (events.EventSource, error) + SubscribeToTaskEvents(logger lager.Logger) (events.EventSource, error) + + // Deprecated: use SubscribeToInstanceEventsByCellID instead + SubscribeToEventsByCellID(logger lager.Logger, cellId string) (events.EventSource, error) + + SubscribeToInstanceEventsByCellID(logger lager.Logger, cellId string) (events.EventSource, error) +} + +type ClientConfig struct { + URL string + IsTLS bool + CAFile string + CertFile string + KeyFile string + ClientSessionCacheSize int + MaxIdleConnsPerHost int + InsecureSkipVerify bool + Retries int + RetryInterval time.Duration // Only affects streaming client, not the http client + RequestTimeout time.Duration // Only affects the http client, not the streaming client +} + +func NewClient(url, caFile, certFile, keyFile string, clientSessionCacheSize, maxIdleConnsPerHost int) (InternalClient, error) { + return NewClientWithConfig(ClientConfig{ + URL: url, + IsTLS: true, + CAFile: caFile, + CertFile: certFile, + KeyFile: keyFile, + ClientSessionCacheSize: clientSessionCacheSize, + MaxIdleConnsPerHost: maxIdleConnsPerHost, + }) +} + +func NewSecureSkipVerifyClient(url, certFile, keyFile string, clientSessionCacheSize, maxIdleConnsPerHost int) (InternalClient, error) { + return NewClientWithConfig(ClientConfig{ + URL: url, + IsTLS: true, + CAFile: "", + CertFile: certFile, + KeyFile: keyFile, + ClientSessionCacheSize: clientSessionCacheSize, + MaxIdleConnsPerHost: maxIdleConnsPerHost, + InsecureSkipVerify: true, + }) +} + +func NewClientWithConfig(cfg ClientConfig) (InternalClient, error) { + if cfg.Retries == 0 { + cfg.Retries = DefaultRetryCount + } + + if cfg.RetryInterval == 0 { + cfg.RetryInterval = time.Second + } + + if cfg.InsecureSkipVerify { + cfg.CAFile = "" + } + + if cfg.IsTLS { + return newSecureClient(cfg) + } else { + return newClient(cfg), nil + } +} + +func newClient(cfg ClientConfig) *client { + return &client{ + httpClient: cfhttp.NewClient(cfhttp.WithRequestTimeout(cfg.RequestTimeout)), + streamingHTTPClient: cfhttp.NewClient(cfhttp.WithStreamingDefaults()), + reqGen: rata.NewRequestGenerator(cfg.URL, Routes), + requestRetryCount: cfg.Retries, + retryInterval: cfg.RetryInterval, + } +} +func newSecureClient(cfg ClientConfig) (InternalClient, error) { + bbsURL, err := url.Parse(cfg.URL) + if err != nil { + return nil, err + } + if bbsURL.Scheme != "https" { + return nil, errors.New("Expected https URL") + } + + var clientOpts []tlsconfig.ClientOption + if !cfg.InsecureSkipVerify { + clientOpts = append(clientOpts, tlsconfig.WithAuthorityFromFile(cfg.CAFile)) + } + + tlsConfig, err := tlsconfig.Build( + tlsconfig.WithInternalServiceDefaults(), + tlsconfig.WithIdentityFromFile(cfg.CertFile, cfg.KeyFile), + ).Client(clientOpts...) + if err != nil { + return nil, err + } + tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(cfg.ClientSessionCacheSize) + + tlsConfig.InsecureSkipVerify = cfg.InsecureSkipVerify + + httpClient := cfhttp.NewClient( + cfhttp.WithRequestTimeout(cfg.RequestTimeout), + cfhttp.WithTLSConfig(tlsConfig), + cfhttp.WithMaxIdleConnsPerHost(cfg.MaxIdleConnsPerHost), + ) + streamingClient := cfhttp.NewClient( + cfhttp.WithStreamingDefaults(), + cfhttp.WithTLSConfig(tlsConfig), + cfhttp.WithMaxIdleConnsPerHost(cfg.MaxIdleConnsPerHost), + ) + + return &client{ + httpClient: httpClient, + streamingHTTPClient: streamingClient, + reqGen: rata.NewRequestGenerator(cfg.URL, Routes), + requestRetryCount: cfg.Retries, + retryInterval: cfg.RetryInterval, + }, nil +} + +type client struct { + httpClient *http.Client + streamingHTTPClient *http.Client + reqGen *rata.RequestGenerator + requestRetryCount int + retryInterval time.Duration +} + +func (c *client) Ping(logger lager.Logger, traceID string) bool { + response := models.PingResponse{} + err := c.doRequest(logger, traceID, PingRoute_r0, nil, nil, nil, &response) + if err != nil { + return false + } + return response.Available +} + +func (c *client) Domains(logger lager.Logger, traceID string) ([]string, error) { + response := models.DomainsResponse{} + err := c.doRequest(logger, traceID, DomainsRoute_r0, nil, nil, nil, &response) + if err != nil { + return nil, err + } + return response.Domains, response.Error.ToError() +} + +func (c *client) UpsertDomain(logger lager.Logger, traceID string, domain string, ttl time.Duration) error { + request := models.UpsertDomainRequest{ + Domain: domain, + Ttl: uint32(ttl.Seconds()), + } + response := models.UpsertDomainResponse{} + err := c.doRequest(logger, traceID, UpsertDomainRoute_r0, nil, nil, &request, &response) + if err != nil { + return err + } + return response.Error.ToError() +} + +func (c *client) ActualLRPs(logger lager.Logger, traceID string, filter models.ActualLRPFilter) ([]*models.ActualLRP, error) { + request := models.ActualLRPsRequest{ + Domain: filter.Domain, + CellId: filter.CellID, + ProcessGuid: filter.ProcessGuid, + } + if filter.Index != nil { + request.SetIndex(*filter.Index) + } + response := models.ActualLRPsResponse{} + err := c.doRequest(logger, traceID, ActualLRPsRoute_r0, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.ActualLrps, response.Error.ToError() +} + +// Deprecated: use ActualLRPs instead +func (c *client) ActualLRPGroups(logger lager.Logger, traceID string, filter models.ActualLRPFilter) ([]*models.ActualLRPGroup, error) { + request := models.ActualLRPGroupsRequest{ + Domain: filter.Domain, + CellId: filter.CellID, + } + response := models.ActualLRPGroupsResponse{} + err := c.doRequest(logger, traceID, ActualLRPGroupsRoute_r0, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.ActualLrpGroups, response.Error.ToError() +} + +// Deprecated: use ActaulLRPs instead +func (c *client) ActualLRPGroupsByProcessGuid(logger lager.Logger, traceID string, processGuid string) ([]*models.ActualLRPGroup, error) { + request := models.ActualLRPGroupsByProcessGuidRequest{ + ProcessGuid: processGuid, + } + response := models.ActualLRPGroupsResponse{} + err := c.doRequest(logger, traceID, ActualLRPGroupsByProcessGuidRoute_r0, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.ActualLrpGroups, response.Error.ToError() +} + +// Deprecated: use ActaulLRPs instead +func (c *client) ActualLRPGroupByProcessGuidAndIndex(logger lager.Logger, traceID string, processGuid string, index int) (*models.ActualLRPGroup, error) { + request := models.ActualLRPGroupByProcessGuidAndIndexRequest{ + ProcessGuid: processGuid, + Index: int32(index), + } + response := models.ActualLRPGroupResponse{} + err := c.doRequest(logger, traceID, ActualLRPGroupByProcessGuidAndIndexRoute_r0, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.ActualLrpGroup, response.Error.ToError() +} + +func (c *client) ClaimActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey) error { + request := models.ClaimActualLRPRequest{ + ProcessGuid: key.ProcessGuid, + Index: key.Index, + ActualLrpInstanceKey: instanceKey, + } + response := models.ActualLRPLifecycleResponse{} + err := c.doRequest(logger, traceID, ClaimActualLRPRoute_r0, nil, nil, &request, &response) + if err != nil { + return err + } + return response.Error.ToError() +} + +func (c *client) StartActualLRP(logger lager.Logger, + traceID string, + key *models.ActualLRPKey, + instanceKey *models.ActualLRPInstanceKey, + netInfo *models.ActualLRPNetInfo, + internalRoutes []*models.ActualLRPInternalRoute, + metricTags map[string]string, + routable bool, + availabilityZone string, +) error { + response := models.ActualLRPLifecycleResponse{} + request := &models.StartActualLRPRequest{ + ActualLrpKey: key, + ActualLrpInstanceKey: instanceKey, + ActualLrpNetInfo: netInfo, + ActualLrpInternalRoutes: internalRoutes, + MetricTags: metricTags, + AvailabilityZone: availabilityZone, + } + request.SetRoutable(routable) + err := c.doRequest(logger, traceID, StartActualLRPRoute_r1, nil, nil, request, &response) + if err != nil && err == EndpointNotFoundErr { + err = c.doRequest(logger, traceID, StartActualLRPRoute_r0, nil, nil, &models.StartActualLRPRequest{ + ActualLrpKey: key, + ActualLrpInstanceKey: instanceKey, + ActualLrpNetInfo: netInfo, + }, &response) + } + + if err != nil { + return err + } + return response.Error.ToError() +} + +func (c *client) CrashActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey, errorMessage string) error { + request := models.CrashActualLRPRequest{ + ActualLrpKey: key, + ActualLrpInstanceKey: instanceKey, + ErrorMessage: errorMessage, + } + response := models.ActualLRPLifecycleResponse{} + err := c.doRequest(logger, traceID, CrashActualLRPRoute_r0, nil, nil, &request, &response) + if err != nil { + return err + + } + return response.Error.ToError() +} + +func (c *client) FailActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, errorMessage string) error { + request := models.FailActualLRPRequest{ + ActualLrpKey: key, + ErrorMessage: errorMessage, + } + response := models.ActualLRPLifecycleResponse{} + err := c.doRequest(logger, traceID, FailActualLRPRoute_r0, nil, nil, &request, &response) + if err != nil { + return err + + } + return response.Error.ToError() +} + +func (c *client) RetireActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey) error { + request := models.RetireActualLRPRequest{ + ActualLrpKey: key, + } + response := models.ActualLRPLifecycleResponse{} + err := c.doRequest(logger, traceID, RetireActualLRPRoute_r0, nil, nil, &request, &response) + if err != nil { + return err + + } + return response.Error.ToError() +} + +func (c *client) RemoveActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey) error { + request := models.RemoveActualLRPRequest{ + ProcessGuid: key.ProcessGuid, + Index: key.Index, + ActualLrpInstanceKey: instanceKey, + } + + response := models.ActualLRPLifecycleResponse{} + err := c.doRequest(logger, traceID, RemoveActualLRPRoute_r0, nil, nil, &request, &response) + if err != nil { + return err + } + return response.Error.ToError() +} + +func (c *client) EvacuateClaimedActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey) (bool, error) { + return c.doEvacRequest(logger, traceID, EvacuateClaimedActualLRPRoute_r0, KeepContainer, &models.EvacuateClaimedActualLRPRequest{ + ActualLrpKey: key, + ActualLrpInstanceKey: instanceKey, + }) +} + +func (c *client) EvacuateCrashedActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey, errorMessage string) (bool, error) { + return c.doEvacRequest(logger, traceID, EvacuateCrashedActualLRPRoute_r0, DeleteContainer, &models.EvacuateCrashedActualLRPRequest{ + ActualLrpKey: key, + ActualLrpInstanceKey: instanceKey, + ErrorMessage: errorMessage, + }) +} + +func (c *client) EvacuateStoppedActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey) (bool, error) { + return c.doEvacRequest(logger, traceID, EvacuateStoppedActualLRPRoute_r0, DeleteContainer, &models.EvacuateStoppedActualLRPRequest{ + ActualLrpKey: key, + ActualLrpInstanceKey: instanceKey, + }) +} + +func (c *client) EvacuateRunningActualLRP(logger lager.Logger, + traceID string, + key *models.ActualLRPKey, + instanceKey *models.ActualLRPInstanceKey, + netInfo *models.ActualLRPNetInfo, + internalRoutes []*models.ActualLRPInternalRoute, + metricTags map[string]string, + routable bool, + availabilityZone string, +) (bool, error) { + request := &models.EvacuateRunningActualLRPRequest{ + ActualLrpKey: key, + ActualLrpInstanceKey: instanceKey, + ActualLrpNetInfo: netInfo, + ActualLrpInternalRoutes: internalRoutes, + MetricTags: metricTags, + AvailabilityZone: availabilityZone, + } + request.SetRoutable(routable) + keepContainer, err := c.doEvacRequest(logger, traceID, EvacuateRunningActualLRPRoute_r1, KeepContainer, request) + if err != nil && err == EndpointNotFoundErr { + keepContainer, err = c.doEvacRequest(logger, traceID, EvacuateRunningActualLRPRoute_r0, KeepContainer, &models.EvacuateRunningActualLRPRequest{ + ActualLrpKey: key, + ActualLrpInstanceKey: instanceKey, + ActualLrpNetInfo: netInfo, + }) + } + + return keepContainer, err +} + +func (c *client) RemoveEvacuatingActualLRP(logger lager.Logger, traceID string, key *models.ActualLRPKey, instanceKey *models.ActualLRPInstanceKey) error { + request := models.RemoveEvacuatingActualLRPRequest{ + ActualLrpKey: key, + ActualLrpInstanceKey: instanceKey, + } + + response := models.RemoveEvacuatingActualLRPResponse{} + err := c.doRequest(logger, traceID, RemoveEvacuatingActualLRPRoute_r0, nil, nil, &request, &response) + if err != nil { + return err + } + + return response.Error.ToError() +} + +func (c *client) DesiredLRPs(logger lager.Logger, traceID string, filter models.DesiredLRPFilter) ([]*models.DesiredLRP, error) { + request := models.DesiredLRPsRequest(filter) + response := models.DesiredLRPsResponse{} + err := c.doRequest(logger, traceID, DesiredLRPsRoute_r3, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.DesiredLrps, response.Error.ToError() +} + +func (c *client) DesiredLRPByProcessGuid(logger lager.Logger, traceID string, processGuid string) (*models.DesiredLRP, error) { + request := models.DesiredLRPByProcessGuidRequest{ + ProcessGuid: processGuid, + } + response := models.DesiredLRPResponse{} + err := c.doRequest(logger, traceID, DesiredLRPByProcessGuidRoute_r3, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.DesiredLrp, response.Error.ToError() +} + +func (c *client) DesiredLRPSchedulingInfos(logger lager.Logger, traceID string, filter models.DesiredLRPFilter) ([]*models.DesiredLRPSchedulingInfo, error) { + request := models.DesiredLRPsRequest(filter) + response := models.DesiredLRPSchedulingInfosResponse{} + err := c.doRequest(logger, traceID, DesiredLRPSchedulingInfosRoute_r0, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.DesiredLrpSchedulingInfos, response.Error.ToError() +} + +func (c *client) DesiredLRPSchedulingInfoByProcessGuid(logger lager.Logger, traceID string, processGuid string) (*models.DesiredLRPSchedulingInfo, error) { + request := models.DesiredLRPByProcessGuidRequest{ + ProcessGuid: processGuid, + } + response := models.DesiredLRPSchedulingInfoByProcessGuidResponse{} + err := c.doRequest(logger, traceID, DesiredLRPSchedulingInfoByProcessGuid_r0, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.DesiredLrpSchedulingInfo, response.Error.ToError() +} + +func (c *client) DesiredLRPRoutingInfos(logger lager.Logger, traceID string, filter models.DesiredLRPFilter) ([]*models.DesiredLRP, error) { + request := models.DesiredLRPsRequest(filter) + response := models.DesiredLRPsResponse{} + err := c.doRequest(logger, traceID, DesiredLRPRoutingInfosRoute_r0, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.DesiredLrps, response.Error.ToError() +} + +func (c *client) doDesiredLRPLifecycleRequest(logger lager.Logger, traceID string, route string, request proto.Message) error { + response := models.DesiredLRPLifecycleResponse{} + err := c.doRequest(logger, traceID, route, nil, nil, request, &response) + if err != nil { + return err + } + return response.Error.ToError() +} + +func (c *client) DesireLRP(logger lager.Logger, traceID string, desiredLRP *models.DesiredLRP) error { + request := models.DesireLRPRequest{ + DesiredLrp: desiredLRP, + } + return c.doDesiredLRPLifecycleRequest(logger, traceID, DesireDesiredLRPRoute_r2, &request) +} + +func (c *client) UpdateDesiredLRP(logger lager.Logger, traceID string, processGuid string, update *models.DesiredLRPUpdate) error { + request := models.UpdateDesiredLRPRequest{ + ProcessGuid: processGuid, + Update: update, + } + return c.doDesiredLRPLifecycleRequest(logger, traceID, UpdateDesiredLRPRoute_r0, &request) +} + +func (c *client) RemoveDesiredLRP(logger lager.Logger, traceID string, processGuid string) error { + request := models.RemoveDesiredLRPRequest{ + ProcessGuid: processGuid, + } + return c.doDesiredLRPLifecycleRequest(logger, traceID, RemoveDesiredLRPRoute_r0, &request) +} + +func (c *client) Tasks(logger lager.Logger, traceID string) ([]*models.Task, error) { + request := models.TasksRequest{} + response := models.TasksResponse{} + err := c.doRequest(logger, traceID, TasksRoute_r3, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.Tasks, response.Error.ToError() +} + +func (c *client) TasksWithFilter(logger lager.Logger, traceID string, filter models.TaskFilter) ([]*models.Task, error) { + request := models.TasksRequest{ + Domain: filter.Domain, + CellId: filter.CellID, + } + response := models.TasksResponse{} + err := c.doRequest(logger, traceID, TasksRoute_r3, nil, nil, &request, &response) + if err != nil { + return nil, err + } + return response.Tasks, response.Error.ToError() +} + +func (c *client) TasksByDomain(logger lager.Logger, traceID string, domain string) ([]*models.Task, error) { + request := models.TasksRequest{ + Domain: domain, + } + response := models.TasksResponse{} + err := c.doRequest(logger, traceID, TasksRoute_r3, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.Tasks, response.Error.ToError() +} + +func (c *client) TasksByCellID(logger lager.Logger, traceID string, cellId string) ([]*models.Task, error) { + request := models.TasksRequest{ + CellId: cellId, + } + response := models.TasksResponse{} + err := c.doRequest(logger, traceID, TasksRoute_r3, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.Tasks, response.Error.ToError() +} + +func (c *client) TaskByGuid(logger lager.Logger, traceID string, taskGuid string) (*models.Task, error) { + request := models.TaskByGuidRequest{ + TaskGuid: taskGuid, + } + response := models.TaskResponse{} + err := c.doRequest(logger, traceID, TaskByGuidRoute_r3, nil, nil, &request, &response) + if err != nil { + return nil, err + } + + return response.Task, response.Error.ToError() +} + +func (c *client) doTaskLifecycleRequest(logger lager.Logger, traceID string, route string, request proto.Message) error { + response := models.TaskLifecycleResponse{} + err := c.doRequest(logger, traceID, route, nil, nil, request, &response) + if err != nil { + return err + } + return response.Error.ToError() +} + +func (c *client) DesireTask(logger lager.Logger, traceID string, taskGuid, domain string, taskDef *models.TaskDefinition) error { + route := DesireTaskRoute_r2 + request := models.DesireTaskRequest{ + TaskGuid: taskGuid, + Domain: domain, + TaskDefinition: taskDef, + } + return c.doTaskLifecycleRequest(logger, traceID, route, &request) +} + +func (c *client) StartTask(logger lager.Logger, traceID string, taskGuid string, cellId string) (bool, error) { + request := &models.StartTaskRequest{ + TaskGuid: taskGuid, + CellId: cellId, + } + response := &models.StartTaskResponse{} + err := c.doRequest(logger, traceID, StartTaskRoute_r0, nil, nil, request, response) + if err != nil { + return false, err + } + return response.ShouldStart, response.Error.ToError() +} + +func (c *client) CancelTask(logger lager.Logger, traceID string, taskGuid string) error { + request := models.TaskGuidRequest{ + TaskGuid: taskGuid, + } + route := CancelTaskRoute_r0 + return c.doTaskLifecycleRequest(logger, traceID, route, &request) +} + +func (c *client) ResolvingTask(logger lager.Logger, traceID string, taskGuid string) error { + request := models.TaskGuidRequest{ + TaskGuid: taskGuid, + } + route := ResolvingTaskRoute_r0 + return c.doTaskLifecycleRequest(logger, traceID, route, &request) +} + +func (c *client) DeleteTask(logger lager.Logger, traceID string, taskGuid string) error { + request := models.TaskGuidRequest{ + TaskGuid: taskGuid, + } + route := DeleteTaskRoute_r0 + return c.doTaskLifecycleRequest(logger, traceID, route, &request) +} + +// Deprecated: use CancelTask instead +func (c *client) FailTask(logger lager.Logger, traceID string, taskGuid string, failureReason string) error { + request := models.FailTaskRequest{ + TaskGuid: taskGuid, + FailureReason: failureReason, + } + route := FailTaskRoute_r0 + return c.doTaskLifecycleRequest(logger, traceID, route, &request) +} + +func (c *client) RejectTask(logger lager.Logger, traceID string, taskGuid string, rejectionReason string) error { + request := models.RejectTaskRequest{ + TaskGuid: taskGuid, + RejectionReason: rejectionReason, + } + route := RejectTaskRoute_r0 + return c.doTaskLifecycleRequest(logger, traceID, route, &request) +} + +func (c *client) CompleteTask(logger lager.Logger, traceID string, taskGuid string, cellId string, failed bool, failureReason, result string) error { + request := models.CompleteTaskRequest{ + TaskGuid: taskGuid, + CellId: cellId, + Failed: failed, + FailureReason: failureReason, + Result: result, + } + route := CompleteTaskRoute_r0 + return c.doTaskLifecycleRequest(logger, traceID, route, &request) +} + +func (c *client) subscribeToEvents(route string, cellId string) (events.EventSource, error) { + request := models.EventsByCellId{ + CellId: cellId, + } + messageBody, err := proto.Marshal(&request) + if err != nil { + return nil, err + } + + sseConfig := sse.Config{ + Client: c.streamingHTTPClient, + RetryParams: sse.RetryParams{ + RetryInterval: c.retryInterval, + MaxRetries: uint16(c.requestRetryCount), + }, + RequestCreator: func() *http.Request { + request, err := c.reqGen.CreateRequest(route, nil, bytes.NewReader(messageBody)) + if err != nil { + panic(err) // totally shouldn't happen + } + + return request + }, + } + + eventSource, err := sseConfig.Connect() + if err != nil { + return nil, err + } + + return events.NewEventSource(eventSource), nil +} + +// Deprecated: use SubscribeToInstanceEvents instead +func (c *client) SubscribeToEvents(logger lager.Logger) (events.EventSource, error) { + return c.subscribeToEvents(LRPGroupEventStreamRoute_r1, "") +} + +func (c *client) SubscribeToInstanceEvents(logger lager.Logger) (events.EventSource, error) { + return c.subscribeToEvents(LRPInstanceEventStreamRoute_r1, "") +} + +func (c *client) SubscribeToTaskEvents(logger lager.Logger) (events.EventSource, error) { + return c.subscribeToEvents(TaskEventStreamRoute_r1, "") +} + +// Deprecated: use SubscribeToInstanceEventsByCellID instead +func (c *client) SubscribeToEventsByCellID(logger lager.Logger, cellId string) (events.EventSource, error) { + return c.subscribeToEvents(LRPGroupEventStreamRoute_r1, cellId) +} + +func (c *client) SubscribeToInstanceEventsByCellID(logger lager.Logger, cellId string) (events.EventSource, error) { + return c.subscribeToEvents(LRPInstanceEventStreamRoute_r1, cellId) +} + +func (c *client) Cells(logger lager.Logger, traceID string) ([]*models.CellPresence, error) { + response := models.CellsResponse{} + err := c.doRequest(logger, traceID, CellsRoute_r0, nil, nil, nil, &response) + if err != nil { + return nil, err + } + return response.Cells, response.Error.ToError() +} + +func (c *client) createRequest(traceID string, requestName string, params rata.Params, queryParams url.Values, message proto.Message) (*http.Request, error) { + var messageBody []byte + var err error + if message != nil { + messageBody, err = proto.Marshal(message) + if err != nil { + return nil, err + } + } + + request, err := c.reqGen.CreateRequest(requestName, params, bytes.NewReader(messageBody)) + if err != nil { + return nil, err + } + + request.URL.RawQuery = queryParams.Encode() + request.ContentLength = int64(len(messageBody)) + request.Header.Set("Content-Type", ProtoContentType) + request.Header.Set(trace.RequestIdHeader, traceID) + return request, nil +} + +func (c *client) doEvacRequest(logger lager.Logger, traceID string, route string, defaultKeepContainer bool, request proto.Message) (bool, error) { + var response models.EvacuationResponse + err := c.doRequest(logger, traceID, route, nil, nil, request, &response) + if err != nil { + return defaultKeepContainer, err + } + + return response.KeepContainer, response.Error.ToError() +} + +func (c *client) doRequest(logger lager.Logger, traceID string, requestName string, params rata.Params, queryParams url.Values, requestBody, responseBody proto.Message) error { + logger = logger.Session("do-request") + var err error + var request *http.Request + + for attempts := 0; attempts < c.requestRetryCount; attempts++ { + logger.Debug("creating-request", lager.Data{"attempt": attempts + 1, "request_name": requestName}) + request, err = c.createRequest(traceID, requestName, params, queryParams, requestBody) + if err != nil { + logger.Error("failed-creating-request", err) + return err + } + + logger.Debug("doing-request", lager.Data{"attempt": attempts + 1, "request_path": request.URL.Path}) + + start := time.Now().UnixNano() + err = c.do(request, responseBody) + finish := time.Now().UnixNano() + + if err != nil { + logger.Error("failed-doing-request", err) + if netErr, ok := err.(net.Error); ok { + if netErr.Timeout() { + err = models.NewError(models.Error_Timeout, err.Error()) + } + } + time.Sleep(500 * time.Millisecond) + } else { + logger.Debug("complete", lager.Data{"request_path": request.URL.Path, "duration_in_ns": finish - start}) + break + } + } + return err +} + +func (c *client) do(request *http.Request, responseObject proto.Message) error { + response, err := c.httpClient.Do(request) + if err != nil { + return err + } + defer func() { + // don't worry about errors when closing the body + _ = response.Body.Close() + }() + + var parsedContentType string + if contentType, ok := response.Header[ContentTypeHeader]; ok { + parsedContentType, _, _ = mime.ParseMediaType(contentType[0]) + } + + if routerError, ok := response.Header[XCfRouterErrorHeader]; ok { + return models.NewError(models.Error_RouterError, routerError[0]) + } + + if parsedContentType == ProtoContentType { + return handleProtoResponse(response, responseObject) + } else { + return handleNonProtoResponse(response) + } +} + +func handleProtoResponse(response *http.Response, responseObject proto.Message) error { + if responseObject == nil { + return models.NewError(models.Error_InvalidRequest, "responseObject cannot be nil") + } + + buf, err := io.ReadAll(response.Body) + if err != nil { + return models.NewError(models.Error_InvalidResponse, fmt.Sprint("failed to read body: ", err.Error())) + } + + err = proto.Unmarshal(buf, responseObject) + if err != nil { + return models.NewError(models.Error_InvalidProtobufMessage, fmt.Sprint("failed to unmarshal proto: ", err.Error())) + } + + return nil +} + +func handleNonProtoResponse(response *http.Response) error { + if response.StatusCode == 404 { + return EndpointNotFoundErr + } + + if response.StatusCode > 299 { + return models.NewError(models.Error_InvalidResponse, fmt.Sprintf(InvalidResponseMessage, response.StatusCode)) + } + return nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/encryption/crypt.go b/vendor/code.cloudfoundry.org/bbs/encryption/crypt.go new file mode 100644 index 00000000..bdc66af6 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/encryption/crypt.go @@ -0,0 +1,76 @@ +package encryption + +import ( + "crypto/cipher" + "fmt" + "io" +) + +const NonceSize = 12 + +type Encrypted struct { + Nonce []byte + KeyLabel string + CipherText []byte +} + +type Encryptor interface { + Encrypt(plaintext []byte) (Encrypted, error) +} + +type Decryptor interface { + Decrypt(encrypted Encrypted) ([]byte, error) +} + +//go:generate counterfeiter -generate + +//counterfeiter:generate . Cryptor + +type Cryptor interface { + Encryptor + Decryptor +} + +type cryptor struct { + keyManager KeyManager + prng io.Reader +} + +func NewCryptor(keyManager KeyManager, prng io.Reader) Cryptor { + return &cryptor{ + keyManager: keyManager, + prng: prng, + } +} + +func (c *cryptor) Encrypt(plaintext []byte) (Encrypted, error) { + key := c.keyManager.EncryptionKey() + + aead, err := cipher.NewGCM(key.Block()) + if err != nil { + return Encrypted{}, fmt.Errorf("Unable to create GCM-wrapped cipher: %q", err) + } + + nonce := make([]byte, aead.NonceSize()) + _, err = io.ReadFull(c.prng, nonce) + if err != nil { + return Encrypted{}, fmt.Errorf("Unable to generate random nonce: %q", err) + } + + ciphertext := aead.Seal(nil, nonce, plaintext, nil) + return Encrypted{KeyLabel: key.Label(), Nonce: nonce, CipherText: ciphertext}, nil +} + +func (d *cryptor) Decrypt(encrypted Encrypted) ([]byte, error) { + key := d.keyManager.DecryptionKey(encrypted.KeyLabel) + if key == nil { + return nil, fmt.Errorf("Key with label %q was not found", encrypted.KeyLabel) + } + + aead, err := cipher.NewGCM(key.Block()) + if err != nil { + return nil, fmt.Errorf("Unable to create GCM-wrapped cipher: %q", err) + } + + return aead.Open(nil, encrypted.Nonce, encrypted.CipherText, nil) +} diff --git a/vendor/code.cloudfoundry.org/bbs/encryption/encryption_config.go b/vendor/code.cloudfoundry.org/bbs/encryption/encryption_config.go new file mode 100644 index 00000000..8e8c883a --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/encryption/encryption_config.go @@ -0,0 +1,42 @@ +package encryption + +import "errors" + +type EncryptionConfig struct { + ActiveKeyLabel string `json:"active_key_label"` + EncryptionKeys map[string]string `json:"encryption_keys"` +} + +func (ef *EncryptionConfig) Parse() (Key, []Key, error) { + if len(ef.EncryptionKeys) == 0 { + return nil, nil, errors.New("Must have at least one encryption key set") + } + + if len(ef.ActiveKeyLabel) == 0 { + return nil, nil, errors.New("Must select an active encryption key") + } + + var encryptionKey Key + + labelsToKeys := map[string]Key{} + + for label, phrase := range ef.EncryptionKeys { + key, err := NewKey(label, phrase) + if err != nil { + return nil, nil, err + } + labelsToKeys[label] = key + } + + encryptionKey, ok := labelsToKeys[ef.ActiveKeyLabel] + if !ok { + return nil, nil, errors.New("The selected active key must be listed on the encryption keys flag") + } + + keys := []Key{} + for _, v := range labelsToKeys { + keys = append(keys, v) + } + + return encryptionKey, keys, nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/encryption/key.go b/vendor/code.cloudfoundry.org/bbs/encryption/key.go new file mode 100644 index 00000000..715a6438 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/encryption/key.go @@ -0,0 +1,49 @@ +package encryption + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/sha256" + "errors" +) + +//counterfeiter:generate . Key + +type Key interface { + Label() string + Block() cipher.Block +} + +type key struct { + block cipher.Block + label string +} + +func NewKey(label, phrase string) (Key, error) { + if label == "" { + return nil, errors.New("A key label is required") + } + + if len(label) > 127 { + return nil, errors.New("Key label is longer than 127 bytes") + } + + hash := sha256.Sum256([]byte(phrase)) + block, err := aes.NewCipher(hash[:]) + if err != nil { + return nil, err + } + + return &key{ + label: label, + block: block, + }, nil +} + +func (k *key) Label() string { + return k.label +} + +func (k *key) Block() cipher.Block { + return k.block +} diff --git a/vendor/code.cloudfoundry.org/bbs/encryption/key_manager.go b/vendor/code.cloudfoundry.org/bbs/encryption/key_manager.go new file mode 100644 index 00000000..441cc28c --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/encryption/key_manager.go @@ -0,0 +1,39 @@ +package encryption + +import "fmt" + +type keyManager struct { + encryptionKey Key + decryptionKeys map[string]Key +} + +type KeyManager interface { + EncryptionKey() Key + DecryptionKey(label string) Key +} + +func NewKeyManager(encryptionKey Key, decryptionKeys []Key) (KeyManager, error) { + decryptionKeyMap := map[string]Key{ + encryptionKey.Label(): encryptionKey, + } + + for _, key := range decryptionKeys { + if existingKey, ok := decryptionKeyMap[key.Label()]; ok && key != existingKey { + return nil, fmt.Errorf("Multiple keys with the same label: %q", key.Label()) + } + decryptionKeyMap[key.Label()] = key + } + + return &keyManager{ + encryptionKey: encryptionKey, + decryptionKeys: decryptionKeyMap, + }, nil +} + +func (m *keyManager) EncryptionKey() Key { + return m.encryptionKey +} + +func (m *keyManager) DecryptionKey(label string) Key { + return m.decryptionKeys[label] +} diff --git a/vendor/code.cloudfoundry.org/bbs/encryption/package.go b/vendor/code.cloudfoundry.org/bbs/encryption/package.go new file mode 100644 index 00000000..2aaa08df --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/encryption/package.go @@ -0,0 +1 @@ +package encryption // import "code.cloudfoundry.org/bbs/encryption" diff --git a/vendor/code.cloudfoundry.org/bbs/events/event_source.go b/vendor/code.cloudfoundry.org/bbs/events/event_source.go new file mode 100644 index 00000000..a8271690 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/events/event_source.go @@ -0,0 +1,269 @@ +package events + +import ( + "encoding/base64" + "errors" + "fmt" + "io" + "strconv" + + "code.cloudfoundry.org/bbs/models" + "github.com/gogo/protobuf/proto" + "github.com/vito/go-sse/sse" +) + +var ( + ErrUnrecognizedEventType = errors.New("unrecognized event type") + ErrSourceClosed = errors.New("source closed") + ErrNoData = errors.New("event with no data") +) + +type invalidPayloadError struct { + payloadType string + protoErr error +} + +func NewInvalidPayloadError(payloadType string, protoErr error) error { + return invalidPayloadError{payloadType: payloadType, protoErr: protoErr} +} + +func (e invalidPayloadError) Error() string { + return fmt.Sprintf("invalid protobuf payload of type %s: %s", e.payloadType, e.protoErr.Error()) +} + +type rawEventSourceError struct { + rawError error +} + +func NewRawEventSourceError(rawError error) error { + return rawEventSourceError{rawError: rawError} +} + +func (e rawEventSourceError) Error() string { + return fmt.Sprintf("raw event source error: %s", e.rawError.Error()) +} + +type closeError struct { + err error +} + +func NewCloseError(err error) error { + return closeError{err: err} +} + +func (e closeError) Error() string { + return fmt.Sprintf("error closing raw source: %s", e.err.Error()) +} + +func NewEventFromModelEvent(eventID int, event models.Event) (sse.Event, error) { + payload, err := proto.Marshal(event) + if err != nil { + return sse.Event{}, err + } + + encodedPayload := base64.StdEncoding.EncodeToString(payload) + return sse.Event{ + ID: strconv.Itoa(eventID), + Name: string(event.EventType()), + Data: []byte(encodedPayload), + }, nil +} + +//go:generate counterfeiter -generate + +//counterfeiter:generate -o eventfakes/fake_event_source.go . EventSource + +// EventSource provides sequential access to a stream of events. +type EventSource interface { + // Next reads the next event from the source. If the connection is lost, it + // automatically reconnects. + // + // If the end of the stream is reached cleanly (which should actually never + // happen), io.EOF is returned. If called after or during Close, + // ErrSourceClosed is returned. + Next() (models.Event, error) + + // Close releases the underlying response, interrupts any in-flight Next, and + // prevents further calls to Next. + Close() error +} + +//counterfeiter:generate -o eventfakes/fake_raw_event_source.go . RawEventSource + +type RawEventSource interface { + Next() (sse.Event, error) + Close() error +} + +type eventSource struct { + rawEventSource RawEventSource +} + +func NewEventSource(raw RawEventSource) EventSource { + return &eventSource{ + rawEventSource: raw, + } +} + +func (e *eventSource) Next() (models.Event, error) { + rawEvent, err := e.rawEventSource.Next() + if err != nil { + switch err { + case io.EOF: + return nil, err + + case sse.ErrSourceClosed: + return nil, ErrSourceClosed + + default: + return nil, NewRawEventSourceError(err) + } + } + + return parseRawEvent(rawEvent) +} + +func (e *eventSource) Close() error { + err := e.rawEventSource.Close() + if err != nil { + return NewCloseError(err) + } + + return nil +} + +func parseRawEvent(rawEvent sse.Event) (models.Event, error) { + data, err := base64.StdEncoding.DecodeString(string(rawEvent.Data)) + if len(data) == 0 { + return nil, NewInvalidPayloadError(rawEvent.Name, ErrNoData) + } else if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + switch rawEvent.Name { + case models.EventTypeDesiredLRPCreated: + event := new(models.DesiredLRPCreatedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + + case models.EventTypeDesiredLRPChanged: + event := new(models.DesiredLRPChangedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + + case models.EventTypeDesiredLRPRemoved: + event := new(models.DesiredLRPRemovedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + + //lint:ignore SA1019 - need to support this event until the deprecation becomes deletion + case models.EventTypeActualLRPCreated: + //lint:ignore SA1019 - need to support this event until the deprecation becomes deletion + event := new(models.ActualLRPCreatedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + + //lint:ignore SA1019 - need to support this event until the deprecation becomes deletion + case models.EventTypeActualLRPChanged: + //lint:ignore SA1019 - need to support this event until the deprecation becomes deletion + event := new(models.ActualLRPChangedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + + //lint:ignore SA1019 - need to support this event until the deprecation becomes deletion + case models.EventTypeActualLRPRemoved: + //lint:ignore SA1019 - need to support this event until the deprecation becomes deletion + event := new(models.ActualLRPRemovedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + + case models.EventTypeActualLRPCrashed: + event := new(models.ActualLRPCrashedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + + case models.EventTypeTaskCreated: + event := new(models.TaskCreatedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + + case models.EventTypeTaskChanged: + event := new(models.TaskChangedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + + case models.EventTypeTaskRemoved: + event := new(models.TaskRemovedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + + case models.EventTypeActualLRPInstanceCreated: + event := new(models.ActualLRPInstanceCreatedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + + case models.EventTypeActualLRPInstanceChanged: + event := new(models.ActualLRPInstanceChangedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + + case models.EventTypeActualLRPInstanceRemoved: + event := new(models.ActualLRPInstanceRemovedEvent) + err := proto.Unmarshal(data, event) + if err != nil { + return nil, NewInvalidPayloadError(rawEvent.Name, err) + } + + return event, nil + } + + return nil, ErrUnrecognizedEventType +} diff --git a/vendor/code.cloudfoundry.org/bbs/events/hub.go b/vendor/code.cloudfoundry.org/bbs/events/hub.go new file mode 100644 index 00000000..0950825d --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/events/hub.go @@ -0,0 +1,200 @@ +package events + +import ( + "errors" + "sync" + + "code.cloudfoundry.org/bbs/models" + "code.cloudfoundry.org/lager/v3" +) + +const MAX_PENDING_SUBSCRIBER_EVENTS = 1024 + +var ErrReadFromClosedSource = errors.New("read from closed source") +var ErrSendToClosedSource = errors.New("send to closed source") +var ErrSourceAlreadyClosed = errors.New("source already closed") +var ErrSlowConsumer = errors.New("slow consumer") + +var ErrSubscribedToClosedHub = errors.New("subscribed to closed hub") +var ErrHubAlreadyClosed = errors.New("hub already closed") + +//counterfeiter:generate -o eventfakes/fake_hub.go . Hub +type Hub interface { + Subscribe() (EventSource, error) + Emit(models.Event) + Close() error + + RegisterCallback(func(count int)) + UnregisterCallback() +} + +type hub struct { + subscribers map[*hubSource]struct{} + closed bool + lock sync.Mutex + logger lager.Logger + + cb func(count int) +} + +func NewHub(logger lager.Logger) Hub { + return &hub{ + subscribers: make(map[*hubSource]struct{}), + logger: logger, + } +} + +func (hub *hub) RegisterCallback(cb func(int)) { + hub.lock.Lock() + hub.cb = cb + size := len(hub.subscribers) + hub.lock.Unlock() + if cb != nil { + cb(size) + } +} + +func (hub *hub) UnregisterCallback() { + hub.lock.Lock() + hub.cb = nil + hub.lock.Unlock() +} + +func (hub *hub) Subscribe() (EventSource, error) { + hub.lock.Lock() + + if hub.closed { + hub.lock.Unlock() + + return nil, ErrSubscribedToClosedHub + } + + sub := newSource(MAX_PENDING_SUBSCRIBER_EVENTS, hub.subscriberClosed) + hub.subscribers[sub] = struct{}{} + cb := hub.cb + size := len(hub.subscribers) + hub.lock.Unlock() + + if cb != nil { + cb(size) + } + return sub, nil +} + +func (hub *hub) Emit(event models.Event) { + hub.lock.Lock() + size := len(hub.subscribers) + + for sub := range hub.subscribers { + err := sub.send(event) + if err != nil { + hub.logger.Error("got-error-sending-event", err) + delete(hub.subscribers, sub) + } + } + + var cb func(int) + if len(hub.subscribers) != size { + cb = hub.cb + size = len(hub.subscribers) + } + hub.lock.Unlock() + + if cb != nil { + cb(size) + } +} + +func (hub *hub) Close() error { + hub.lock.Lock() + defer hub.lock.Unlock() + + if hub.closed { + return ErrHubAlreadyClosed + } + + hub.closeSubscribers() + hub.closed = true + if hub.cb != nil { + hub.cb(0) + } + return nil +} + +func (hub *hub) closeSubscribers() { + for sub := range hub.subscribers { + _ = sub.Close() + } + hub.subscribers = nil +} + +func (hub *hub) subscriberClosed(source *hubSource) { + hub.lock.Lock() + delete(hub.subscribers, source) + cb := hub.cb + count := len(hub.subscribers) + hub.lock.Unlock() + + if cb != nil { + cb(count) + } +} + +type hubSource struct { + events chan models.Event + closeCallback func(*hubSource) + closed bool + lock sync.Mutex +} + +func newSource(maxPendingEvents int, closeCallback func(*hubSource)) *hubSource { + return &hubSource{ + events: make(chan models.Event, maxPendingEvents), + closeCallback: closeCallback, + } +} + +func (source *hubSource) Next() (models.Event, error) { + event, ok := <-source.events + if !ok { + return nil, ErrReadFromClosedSource + } + return event, nil +} + +func (source *hubSource) Close() error { + source.lock.Lock() + defer source.lock.Unlock() + + if source.closed { + return ErrSourceAlreadyClosed + } + close(source.events) + source.closed = true + go source.closeCallback(source) + return nil +} + +func (source *hubSource) send(event models.Event) error { + source.lock.Lock() + + if source.closed { + source.lock.Unlock() + return ErrSendToClosedSource + } + + select { + case source.events <- event: + source.lock.Unlock() + return nil + + default: + source.lock.Unlock() + err := source.Close() + if err != nil { + return err + } + + return ErrSlowConsumer + } +} diff --git a/vendor/code.cloudfoundry.org/bbs/events/package.go b/vendor/code.cloudfoundry.org/bbs/events/package.go new file mode 100644 index 00000000..b59b892f --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/events/package.go @@ -0,0 +1 @@ +package events // import "code.cloudfoundry.org/bbs/events" diff --git a/vendor/code.cloudfoundry.org/bbs/format/encoding.go b/vendor/code.cloudfoundry.org/bbs/format/encoding.go new file mode 100644 index 00000000..37776147 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/format/encoding.go @@ -0,0 +1,102 @@ +package format + +import ( + "encoding/base64" + "fmt" + + "code.cloudfoundry.org/bbs/encryption" +) + +type Encoding [EncodingOffset]byte + +var ( + BASE64_ENCRYPTED Encoding = [2]byte{'0', '2'} +) + +const EncodingOffset int = 2 + +type encoder struct { + cryptor encryption.Cryptor +} + +type Encoder interface { + Encode(payload []byte) ([]byte, error) + Decode(payload []byte) ([]byte, error) +} + +func NewEncoder(cryptor encryption.Cryptor) Encoder { + return &encoder{cryptor: cryptor} +} + +func (e *encoder) Encode(payload []byte) ([]byte, error) { + encrypted, err := e.encrypt(payload) + if err != nil { + return nil, err + } + encoded := encodeBase64(encrypted) + return append(BASE64_ENCRYPTED[:], encoded...), nil +} + +func (e *encoder) Decode(payload []byte) ([]byte, error) { + encoding := encodingFromPayload(payload) + switch encoding { + case BASE64_ENCRYPTED: + encrypted, err := decodeBase64(payload[EncodingOffset:]) + if err != nil { + return nil, err + } + return e.decrypt(encrypted) + default: + return nil, fmt.Errorf("Unknown encoding: %v", encoding) + } +} + +func (e *encoder) encrypt(cleartext []byte) ([]byte, error) { + encrypted, err := e.cryptor.Encrypt(cleartext) + if err != nil { + return nil, err + } + + payload := []byte{} + payload = append(payload, byte(len(encrypted.KeyLabel))) + payload = append(payload, []byte(encrypted.KeyLabel)...) + payload = append(payload, encrypted.Nonce...) + payload = append(payload, encrypted.CipherText...) + + return payload, nil +} + +func (e *encoder) decrypt(encryptedData []byte) ([]byte, error) { + labelLength := encryptedData[0] + encryptedData = encryptedData[1:] + + label := string(encryptedData[:labelLength]) + encryptedData = encryptedData[labelLength:] + + nonce := encryptedData[:encryption.NonceSize] + ciphertext := encryptedData[encryption.NonceSize:] + + return e.cryptor.Decrypt(encryption.Encrypted{ + KeyLabel: label, + Nonce: nonce, + CipherText: ciphertext, + }) +} + +func encodeBase64(unencodedPayload []byte) []byte { + encodedLen := base64.StdEncoding.EncodedLen(len(unencodedPayload)) + encodedPayload := make([]byte, encodedLen) + base64.StdEncoding.Encode(encodedPayload, unencodedPayload) + return encodedPayload +} + +func decodeBase64(encodedPayload []byte) ([]byte, error) { + decodedLen := base64.StdEncoding.DecodedLen(len(encodedPayload)) + decodedPayload := make([]byte, decodedLen) + n, err := base64.StdEncoding.Decode(decodedPayload, encodedPayload) + return decodedPayload[:n], err +} + +func encodingFromPayload(payload []byte) Encoding { + return Encoding{payload[0], payload[1]} +} diff --git a/vendor/code.cloudfoundry.org/bbs/format/envelope.go b/vendor/code.cloudfoundry.org/bbs/format/envelope.go new file mode 100644 index 00000000..dcd306d0 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/format/envelope.go @@ -0,0 +1,61 @@ +package format + +import ( + "code.cloudfoundry.org/lager/v3" + "github.com/gogo/protobuf/proto" +) + +type EnvelopeFormat byte + +const ( + PROTO EnvelopeFormat = 2 +) + +const EnvelopeOffset int = 2 + +func UnmarshalEnvelope(logger lager.Logger, unencodedPayload []byte, model Model) error { + return UnmarshalProto(logger, unencodedPayload[EnvelopeOffset:], model) +} + +// dummy version for backward compatability. old BBS used to serialize proto +// messages with a 2-byte header that has the envelope format (i.e. PROTO) and +// the version of the model (e.g. 0, 1 or 2). Adding the version was a +// pre-mature optimization that we decided to get rid of in #133215113. That +// said, we have the ensure the header is a 2-byte to avoid breaking older BBS +// Deprecated: do not use, see note above +const version = 0 + +func MarshalEnvelope(model Model) ([]byte, error) { + var payload []byte + var err error + + payload, err = MarshalProto(model) + + if err != nil { + return nil, err + } + + data := make([]byte, 0, len(payload)+EnvelopeOffset) + data = append(data, byte(PROTO), byte(version)) + data = append(data, payload...) + + return data, nil +} + +func UnmarshalProto(logger lager.Logger, marshaledPayload []byte, model Model) error { + err := proto.Unmarshal(marshaledPayload, model) + if err != nil { + logger.Error("failed-to-proto-unmarshal-payload", err) + return err + } + return nil +} + +func MarshalProto(v Model) ([]byte, error) { + bytes, err := proto.Marshal(v) + if err != nil { + return nil, err + } + + return bytes, nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/format/format.go b/vendor/code.cloudfoundry.org/bbs/format/format.go new file mode 100644 index 00000000..b1f384ff --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/format/format.go @@ -0,0 +1,38 @@ +package format + +import ( + "code.cloudfoundry.org/bbs/encryption" + "code.cloudfoundry.org/lager/v3" +) + +type serializer struct { + encoder Encoder +} + +type Serializer interface { + Marshal(logger lager.Logger, model Model) ([]byte, error) + Unmarshal(logger lager.Logger, encodedPayload []byte, model Model) error +} + +func NewSerializer(cryptor encryption.Cryptor) Serializer { + return &serializer{ + encoder: NewEncoder(cryptor), + } +} + +func (s *serializer) Marshal(logger lager.Logger, model Model) ([]byte, error) { + envelopedPayload, err := MarshalEnvelope(model) + if err != nil { + return nil, err + } + + return s.encoder.Encode(envelopedPayload) +} + +func (s *serializer) Unmarshal(logger lager.Logger, encodedPayload []byte, model Model) error { + unencodedPayload, err := s.encoder.Decode(encodedPayload) + if err != nil { + return err + } + return UnmarshalEnvelope(logger, unencodedPayload, model) +} diff --git a/vendor/code.cloudfoundry.org/bbs/format/package.go b/vendor/code.cloudfoundry.org/bbs/format/package.go new file mode 100644 index 00000000..d9000165 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/format/package.go @@ -0,0 +1 @@ +package format // import "code.cloudfoundry.org/bbs/format" diff --git a/vendor/code.cloudfoundry.org/bbs/format/versioner.go b/vendor/code.cloudfoundry.org/bbs/format/versioner.go new file mode 100644 index 00000000..e4c3feb2 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/format/versioner.go @@ -0,0 +1,16 @@ +package format + +import "github.com/gogo/protobuf/proto" + +type Version byte + +const ( + V0 Version = 0 + V1 Version = 1 + V2 Version = 2 + V3 Version = 3 +) + +type Model interface { + proto.Message +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/actions.go b/vendor/code.cloudfoundry.org/bbs/models/actions.go new file mode 100644 index 00000000..edd14a41 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/actions.go @@ -0,0 +1,592 @@ +package models + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "code.cloudfoundry.org/bbs/format" + proto "github.com/gogo/protobuf/proto" +) + +const ( + ActionTypeDownload = "download" + ActionTypeEmitProgress = "emit_progress" + ActionTypeRun = "run" + ActionTypeUpload = "upload" + ActionTypeTimeout = "timeout" + ActionTypeTry = "try" + ActionTypeParallel = "parallel" + ActionTypeSerial = "serial" + ActionTypeCodependent = "codependent" +) + +var ErrInvalidActionType = errors.New("invalid action type") + +type ActionInterface interface { + ActionType() string + Validate() error + proto.Message +} + +func (a *Action) GetValue() interface{} { + if a.DownloadAction != nil { + return a.DownloadAction + } + if a.UploadAction != nil { + return a.UploadAction + } + if a.RunAction != nil { + return a.RunAction + } + if a.TimeoutAction != nil { + return a.TimeoutAction + } + if a.EmitProgressAction != nil { + return a.EmitProgressAction + } + if a.TryAction != nil { + return a.TryAction + } + if a.ParallelAction != nil { + return a.ParallelAction + } + if a.SerialAction != nil { + return a.SerialAction + } + if a.CodependentAction != nil { + return a.CodependentAction + } + return nil +} + +func (a *Action) SetValue(value interface{}) bool { + switch vt := value.(type) { + case *DownloadAction: + a.DownloadAction = vt + case *UploadAction: + a.UploadAction = vt + case *RunAction: + a.RunAction = vt + case *TimeoutAction: + a.TimeoutAction = vt + case *EmitProgressAction: + a.EmitProgressAction = vt + case *TryAction: + a.TryAction = vt + case *ParallelAction: + a.ParallelAction = vt + case *SerialAction: + a.SerialAction = vt + case *CodependentAction: + a.CodependentAction = vt + default: + return false + } + return true +} + +func (a *Action) Validate() error { + if a == nil { + return nil + } + + if inner := UnwrapAction(a); inner != nil { + err := inner.Validate() + if err != nil { + return err + } + } else { + return ErrInvalidField{"inner-action"} + } + return nil +} + +func (a *DownloadAction) ActionType() string { + return ActionTypeDownload +} + +func (a DownloadAction) Validate() error { + var validationError ValidationError + + if a.GetFrom() == "" { + validationError = validationError.Append(ErrInvalidField{"from"}) + } + + if a.GetTo() == "" { + validationError = validationError.Append(ErrInvalidField{"to"}) + } + + if a.GetUser() == "" { + validationError = validationError.Append(ErrInvalidField{"user"}) + } + + if a.GetChecksumValue() != "" && a.GetChecksumAlgorithm() == "" { + validationError = validationError.Append(ErrInvalidField{"checksum algorithm"}) + } + + if a.GetChecksumValue() == "" && a.GetChecksumAlgorithm() != "" { + validationError = validationError.Append(ErrInvalidField{"checksum value"}) + } + + if a.GetChecksumValue() != "" && a.GetChecksumAlgorithm() != "" { + if !contains([]string{"md5", "sha1", "sha256"}, strings.ToLower(a.GetChecksumAlgorithm())) { + validationError = validationError.Append(ErrInvalidField{"invalid algorithm"}) + } + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func contains(array []string, element string) bool { + for _, item := range array { + if item == element { + return true + } + } + return false +} + +func (a *UploadAction) ActionType() string { + return ActionTypeUpload +} + +func (a UploadAction) Validate() error { + var validationError ValidationError + + if a.GetTo() == "" { + validationError = validationError.Append(ErrInvalidField{"to"}) + } + + if a.GetFrom() == "" { + validationError = validationError.Append(ErrInvalidField{"from"}) + } + + if a.GetUser() == "" { + validationError = validationError.Append(ErrInvalidField{"user"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (a *RunAction) ActionType() string { + return ActionTypeRun +} + +func (a RunAction) Validate() error { + var validationError ValidationError + + if a.Path == "" { + validationError = validationError.Append(ErrInvalidField{"path"}) + } + + if a.User == "" { + validationError = validationError.Append(ErrInvalidField{"user"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (a *TimeoutAction) ActionType() string { + return ActionTypeTimeout +} + +func (a TimeoutAction) Validate() error { + var validationError ValidationError + + if a.Action == nil { + validationError = validationError.Append(ErrInvalidField{"action"}) + } else { + err := UnwrapAction(a.Action).Validate() + if err != nil { + validationError = validationError.Append(err) + } + } + + if a.GetTimeoutMs() <= 0 { + validationError = validationError.Append(ErrInvalidField{"timeout_ms"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (a *TryAction) ActionType() string { + return ActionTypeTry +} + +func (a TryAction) Validate() error { + var validationError ValidationError + + if a.Action == nil { + validationError = validationError.Append(ErrInvalidField{"action"}) + } else { + err := UnwrapAction(a.Action).Validate() + if err != nil { + validationError = validationError.Append(err) + } + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (*ParallelAction) Version() format.Version { + return format.V0 +} + +func (a *ParallelAction) ActionType() string { + return ActionTypeParallel +} + +func (a ParallelAction) Validate() error { + var validationError ValidationError + + if a.Actions == nil || len(a.Actions) == 0 { + validationError = validationError.Append(ErrInvalidField{"actions"}) + } else { + for index, action := range a.Actions { + if action == nil { + errorString := fmt.Sprintf("action at index %d", index) + validationError = validationError.Append(ErrInvalidField{errorString}) + } else { + err := UnwrapAction(action).Validate() + if err != nil { + validationError = validationError.Append(err) + } + } + } + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (a *CodependentAction) ActionType() string { + return ActionTypeCodependent +} + +func (a CodependentAction) Validate() error { + var validationError ValidationError + + if a.Actions == nil || len(a.Actions) == 0 { + validationError = validationError.Append(ErrInvalidField{"actions"}) + } else { + for index, action := range a.Actions { + if action == nil { + errorString := fmt.Sprintf("action at index %d", index) + validationError = validationError.Append(ErrInvalidField{errorString}) + } else { + err := UnwrapAction(action).Validate() + if err != nil { + validationError = validationError.Append(err) + } + } + } + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +// func (*SerialAction) Version() format.Version { +// return format.V0 +// } + +// func (*SerialAction) MigrateFromVersion(v format.Version) error { +// return nil +// } + +func (a *SerialAction) ActionType() string { + return ActionTypeSerial +} + +func (a SerialAction) Validate() error { + var validationError ValidationError + + if a.Actions == nil || len(a.Actions) == 0 { + validationError = validationError.Append(ErrInvalidField{"actions"}) + } else { + for index, action := range a.Actions { + if action == nil { + errorString := fmt.Sprintf("action at index %d", index) + validationError = validationError.Append(ErrInvalidField{errorString}) + } else { + err := UnwrapAction(action).Validate() + if err != nil { + validationError = validationError.Append(err) + } + } + } + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (a *EmitProgressAction) ActionType() string { + return ActionTypeEmitProgress +} + +func (a EmitProgressAction) Validate() error { + var validationError ValidationError + + if a.Action == nil { + validationError = validationError.Append(ErrInvalidField{"action"}) + } else { + err := UnwrapAction(a.Action).Validate() + if err != nil { + validationError = validationError.Append(err) + } + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func EmitProgressFor(action ActionInterface, startMessage string, successMessage string, failureMessagePrefix string) *EmitProgressAction { + return &EmitProgressAction{ + Action: WrapAction(action), + StartMessage: startMessage, + SuccessMessage: successMessage, + FailureMessagePrefix: failureMessagePrefix, + } +} + +func Timeout(action ActionInterface, timeout time.Duration) *TimeoutAction { + return &TimeoutAction{ + Action: WrapAction(action), + TimeoutMs: (int64)(timeout / 1000000), + } +} + +func Try(action ActionInterface) *TryAction { + return &TryAction{Action: WrapAction(action)} +} + +func Parallel(actions ...ActionInterface) *ParallelAction { + return &ParallelAction{Actions: WrapActions(actions)} +} + +func Codependent(actions ...ActionInterface) *CodependentAction { + return &CodependentAction{Actions: WrapActions(actions)} +} + +func Serial(actions ...ActionInterface) *SerialAction { + return &SerialAction{Actions: WrapActions(actions)} +} + +func UnwrapAction(action *Action) ActionInterface { + if action == nil { + return nil + } + a := action.GetValue() + if a == nil { + return nil + } + return a.(ActionInterface) +} + +func WrapActions(actions []ActionInterface) []*Action { + wrappedActions := make([]*Action, 0, len(actions)) + for _, action := range actions { + wrappedActions = append(wrappedActions, WrapAction(action)) + } + return wrappedActions +} + +func WrapAction(action ActionInterface) *Action { + if action == nil { + return nil + } + a := &Action{} + a.SetValue(action) + return a +} + +// SetDeprecatedTimeoutNs returns a deep copy of the Action tree. If there are +// any TimeoutActions in the tree, their DeprecatedStartTimeoutS is set to +// `TimeoutMs * time.Millisecond'. +func (action *Action) SetDeprecatedTimeoutNs() *Action { + if action == nil { + return nil + } + + a := action.GetValue() + switch actionModel := a.(type) { + case *RunAction, *DownloadAction, *UploadAction: + return action + + case *TimeoutAction: + timeoutAction := *actionModel + timeoutAction.DeprecatedTimeoutNs = timeoutAction.TimeoutMs * int64(time.Millisecond) + return WrapAction(&timeoutAction) + + case *EmitProgressAction: + return actionModel.Action.SetDeprecatedTimeoutNs() + + case *TryAction: + return actionModel.Action.SetDeprecatedTimeoutNs() + + case *ParallelAction: + newActions := []*Action{} + for _, subaction := range actionModel.Actions { + newActions = append(newActions, subaction.SetDeprecatedTimeoutNs()) + } + parallelAction := *actionModel + parallelAction.Actions = newActions + return WrapAction(¶llelAction) + + case *SerialAction: + newActions := []*Action{} + for _, subaction := range actionModel.Actions { + newActions = append(newActions, subaction.SetDeprecatedTimeoutNs()) + } + serialAction := *actionModel + serialAction.Actions = newActions + return WrapAction(&serialAction) + + case *CodependentAction: + newActions := []*Action{} + for _, subaction := range actionModel.Actions { + newActions = append(newActions, subaction.SetDeprecatedTimeoutNs()) + } + codependentAction := *actionModel + codependentAction.Actions = newActions + return WrapAction(&codependentAction) + } + + return action +} + +func (action *Action) SetTimeoutMsFromDeprecatedTimeoutNs() { + if action == nil { + return + } + + a := action.GetValue() + switch actionModel := a.(type) { + case *RunAction, *DownloadAction, *UploadAction: + return + + case *TimeoutAction: + timeoutAction := actionModel + timeoutAction.TimeoutMs = timeoutAction.DeprecatedTimeoutNs / int64(time.Millisecond) + + case *EmitProgressAction: + actionModel.Action.SetDeprecatedTimeoutNs() + + case *TryAction: + actionModel.Action.SetDeprecatedTimeoutNs() + + case *ParallelAction: + for _, subaction := range actionModel.Actions { + subaction.SetDeprecatedTimeoutNs() + } + + case *SerialAction: + for _, subaction := range actionModel.Actions { + subaction.SetDeprecatedTimeoutNs() + } + + case *CodependentAction: + for _, subaction := range actionModel.Actions { + subaction.SetDeprecatedTimeoutNs() + } + } +} + +type internalResourceLimits struct { + Nofile *uint64 `json:"nofile,omitempty"` + Nproc *uint64 `json:"nproc,omitempty"` +} + +func (l *ResourceLimits) UnmarshalJSON(data []byte) error { + var limit internalResourceLimits + if err := json.Unmarshal(data, &limit); err != nil { + return err + } + + if limit.Nofile != nil { + l.SetNofile(*limit.Nofile) + } + if limit.Nproc != nil { + l.SetNproc(*limit.Nproc) + } + + return nil +} + +func (l ResourceLimits) MarshalJSON() ([]byte, error) { + var limit internalResourceLimits + if l.NofileExists() { + n := l.GetNofile() + limit.Nofile = &n + } + if l.NprocExists() { + n := l.GetNproc() + limit.Nproc = &n + } + return json.Marshal(limit) +} + +func (l *ResourceLimits) SetNofile(nofile uint64) { + l.OptionalNofile = &ResourceLimits_Nofile{ + Nofile: nofile, + } +} + +func (m *ResourceLimits) GetNofilePtr() *uint64 { + if x, ok := m.GetOptionalNofile().(*ResourceLimits_Nofile); ok { + return &x.Nofile + } + return nil +} + +func (l *ResourceLimits) NofileExists() bool { + _, ok := l.GetOptionalNofile().(*ResourceLimits_Nofile) + return ok +} + +func (l *ResourceLimits) SetNproc(nproc uint64) { + l.OptionalNproc = &ResourceLimits_Nproc{ + Nproc: nproc, + } +} + +func (l *ResourceLimits) NprocExists() bool { + _, ok := l.GetOptionalNproc().(*ResourceLimits_Nproc) + return ok +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/actions.pb.go b/vendor/code.cloudfoundry.org/bbs/models/actions.pb.go new file mode 100644 index 00000000..9d98e76c --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/actions.pb.go @@ -0,0 +1,5076 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: actions.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Action struct { + // Note: we only expect one of the following set of fields to be + // set. Previously we used `option (gogoproto.onlyone) = true' but since this + // is now deprecated and oneof introduces a lot of structural changes, we + // deferred on switching to oneof for now until there is a good reason for it. + // disadvantages of using multiple optionals as opposed to oneof are: + // - less memory usage + // disadvantages of using multiple optionals without onlyone: + // - writing our own GetAction/SetAction methods + // action oneof { + DownloadAction *DownloadAction `protobuf:"bytes,1,opt,name=download_action,json=downloadAction,proto3" json:"download,omitempty"` + UploadAction *UploadAction `protobuf:"bytes,2,opt,name=upload_action,json=uploadAction,proto3" json:"upload,omitempty"` + RunAction *RunAction `protobuf:"bytes,3,opt,name=run_action,json=runAction,proto3" json:"run,omitempty"` + TimeoutAction *TimeoutAction `protobuf:"bytes,4,opt,name=timeout_action,json=timeoutAction,proto3" json:"timeout,omitempty"` + EmitProgressAction *EmitProgressAction `protobuf:"bytes,5,opt,name=emit_progress_action,json=emitProgressAction,proto3" json:"emit_progress,omitempty"` + TryAction *TryAction `protobuf:"bytes,6,opt,name=try_action,json=tryAction,proto3" json:"try,omitempty"` + ParallelAction *ParallelAction `protobuf:"bytes,7,opt,name=parallel_action,json=parallelAction,proto3" json:"parallel,omitempty"` + SerialAction *SerialAction `protobuf:"bytes,8,opt,name=serial_action,json=serialAction,proto3" json:"serial,omitempty"` + CodependentAction *CodependentAction `protobuf:"bytes,9,opt,name=codependent_action,json=codependentAction,proto3" json:"codependent,omitempty"` +} + +func (m *Action) Reset() { *m = Action{} } +func (*Action) ProtoMessage() {} +func (*Action) Descriptor() ([]byte, []int) { + return fileDescriptor_eeb49063df94c2b8, []int{0} +} +func (m *Action) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Action.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Action) XXX_Merge(src proto.Message) { + xxx_messageInfo_Action.Merge(m, src) +} +func (m *Action) XXX_Size() int { + return m.Size() +} +func (m *Action) XXX_DiscardUnknown() { + xxx_messageInfo_Action.DiscardUnknown(m) +} + +var xxx_messageInfo_Action proto.InternalMessageInfo + +func (m *Action) GetDownloadAction() *DownloadAction { + if m != nil { + return m.DownloadAction + } + return nil +} + +func (m *Action) GetUploadAction() *UploadAction { + if m != nil { + return m.UploadAction + } + return nil +} + +func (m *Action) GetRunAction() *RunAction { + if m != nil { + return m.RunAction + } + return nil +} + +func (m *Action) GetTimeoutAction() *TimeoutAction { + if m != nil { + return m.TimeoutAction + } + return nil +} + +func (m *Action) GetEmitProgressAction() *EmitProgressAction { + if m != nil { + return m.EmitProgressAction + } + return nil +} + +func (m *Action) GetTryAction() *TryAction { + if m != nil { + return m.TryAction + } + return nil +} + +func (m *Action) GetParallelAction() *ParallelAction { + if m != nil { + return m.ParallelAction + } + return nil +} + +func (m *Action) GetSerialAction() *SerialAction { + if m != nil { + return m.SerialAction + } + return nil +} + +func (m *Action) GetCodependentAction() *CodependentAction { + if m != nil { + return m.CodependentAction + } + return nil +} + +type DownloadAction struct { + Artifact string `protobuf:"bytes,1,opt,name=artifact,proto3" json:"artifact,omitempty"` + From string `protobuf:"bytes,2,opt,name=from,proto3" json:"from"` + To string `protobuf:"bytes,3,opt,name=to,proto3" json:"to"` + CacheKey string `protobuf:"bytes,4,opt,name=cache_key,json=cacheKey,proto3" json:"cache_key"` + LogSource string `protobuf:"bytes,5,opt,name=log_source,json=logSource,proto3" json:"log_source,omitempty"` + User string `protobuf:"bytes,6,opt,name=user,proto3" json:"user"` + ChecksumAlgorithm string `protobuf:"bytes,7,opt,name=checksum_algorithm,json=checksumAlgorithm,proto3" json:"checksum_algorithm,omitempty"` + ChecksumValue string `protobuf:"bytes,8,opt,name=checksum_value,json=checksumValue,proto3" json:"checksum_value,omitempty"` +} + +func (m *DownloadAction) Reset() { *m = DownloadAction{} } +func (*DownloadAction) ProtoMessage() {} +func (*DownloadAction) Descriptor() ([]byte, []int) { + return fileDescriptor_eeb49063df94c2b8, []int{1} +} +func (m *DownloadAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DownloadAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DownloadAction.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DownloadAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_DownloadAction.Merge(m, src) +} +func (m *DownloadAction) XXX_Size() int { + return m.Size() +} +func (m *DownloadAction) XXX_DiscardUnknown() { + xxx_messageInfo_DownloadAction.DiscardUnknown(m) +} + +var xxx_messageInfo_DownloadAction proto.InternalMessageInfo + +func (m *DownloadAction) GetArtifact() string { + if m != nil { + return m.Artifact + } + return "" +} + +func (m *DownloadAction) GetFrom() string { + if m != nil { + return m.From + } + return "" +} + +func (m *DownloadAction) GetTo() string { + if m != nil { + return m.To + } + return "" +} + +func (m *DownloadAction) GetCacheKey() string { + if m != nil { + return m.CacheKey + } + return "" +} + +func (m *DownloadAction) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +func (m *DownloadAction) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *DownloadAction) GetChecksumAlgorithm() string { + if m != nil { + return m.ChecksumAlgorithm + } + return "" +} + +func (m *DownloadAction) GetChecksumValue() string { + if m != nil { + return m.ChecksumValue + } + return "" +} + +type UploadAction struct { + Artifact string `protobuf:"bytes,1,opt,name=artifact,proto3" json:"artifact,omitempty"` + From string `protobuf:"bytes,2,opt,name=from,proto3" json:"from"` + To string `protobuf:"bytes,3,opt,name=to,proto3" json:"to"` + LogSource string `protobuf:"bytes,4,opt,name=log_source,json=logSource,proto3" json:"log_source,omitempty"` + User string `protobuf:"bytes,5,opt,name=user,proto3" json:"user"` +} + +func (m *UploadAction) Reset() { *m = UploadAction{} } +func (*UploadAction) ProtoMessage() {} +func (*UploadAction) Descriptor() ([]byte, []int) { + return fileDescriptor_eeb49063df94c2b8, []int{2} +} +func (m *UploadAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UploadAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UploadAction.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UploadAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_UploadAction.Merge(m, src) +} +func (m *UploadAction) XXX_Size() int { + return m.Size() +} +func (m *UploadAction) XXX_DiscardUnknown() { + xxx_messageInfo_UploadAction.DiscardUnknown(m) +} + +var xxx_messageInfo_UploadAction proto.InternalMessageInfo + +func (m *UploadAction) GetArtifact() string { + if m != nil { + return m.Artifact + } + return "" +} + +func (m *UploadAction) GetFrom() string { + if m != nil { + return m.From + } + return "" +} + +func (m *UploadAction) GetTo() string { + if m != nil { + return m.To + } + return "" +} + +func (m *UploadAction) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +func (m *UploadAction) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +type RunAction struct { + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path"` + Args []string `protobuf:"bytes,2,rep,name=args,proto3" json:"args,omitempty"` + Dir string `protobuf:"bytes,3,opt,name=dir,proto3" json:"dir,omitempty"` + Env []*EnvironmentVariable `protobuf:"bytes,4,rep,name=env,proto3" json:"env,omitempty"` + ResourceLimits *ResourceLimits `protobuf:"bytes,5,opt,name=resource_limits,json=resourceLimits,proto3" json:"resource_limits,omitempty"` + User string `protobuf:"bytes,6,opt,name=user,proto3" json:"user"` + LogSource string `protobuf:"bytes,7,opt,name=log_source,json=logSource,proto3" json:"log_source,omitempty"` + SuppressLogOutput bool `protobuf:"varint,8,opt,name=suppress_log_output,json=suppressLogOutput,proto3" json:"suppress_log_output"` +} + +func (m *RunAction) Reset() { *m = RunAction{} } +func (*RunAction) ProtoMessage() {} +func (*RunAction) Descriptor() ([]byte, []int) { + return fileDescriptor_eeb49063df94c2b8, []int{3} +} +func (m *RunAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RunAction.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RunAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAction.Merge(m, src) +} +func (m *RunAction) XXX_Size() int { + return m.Size() +} +func (m *RunAction) XXX_DiscardUnknown() { + xxx_messageInfo_RunAction.DiscardUnknown(m) +} + +var xxx_messageInfo_RunAction proto.InternalMessageInfo + +func (m *RunAction) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *RunAction) GetArgs() []string { + if m != nil { + return m.Args + } + return nil +} + +func (m *RunAction) GetDir() string { + if m != nil { + return m.Dir + } + return "" +} + +func (m *RunAction) GetEnv() []*EnvironmentVariable { + if m != nil { + return m.Env + } + return nil +} + +func (m *RunAction) GetResourceLimits() *ResourceLimits { + if m != nil { + return m.ResourceLimits + } + return nil +} + +func (m *RunAction) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *RunAction) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +func (m *RunAction) GetSuppressLogOutput() bool { + if m != nil { + return m.SuppressLogOutput + } + return false +} + +type TimeoutAction struct { + Action *Action `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"` + DeprecatedTimeoutNs int64 `protobuf:"varint,2,opt,name=deprecated_timeout_ns,json=deprecatedTimeoutNs,proto3" json:"timeout,omitempty"` // Deprecated: Do not use. + LogSource string `protobuf:"bytes,3,opt,name=log_source,json=logSource,proto3" json:"log_source,omitempty"` + TimeoutMs int64 `protobuf:"varint,4,opt,name=timeout_ms,json=timeoutMs,proto3" json:"timeout_ms"` +} + +func (m *TimeoutAction) Reset() { *m = TimeoutAction{} } +func (*TimeoutAction) ProtoMessage() {} +func (*TimeoutAction) Descriptor() ([]byte, []int) { + return fileDescriptor_eeb49063df94c2b8, []int{4} +} +func (m *TimeoutAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimeoutAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimeoutAction.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TimeoutAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeoutAction.Merge(m, src) +} +func (m *TimeoutAction) XXX_Size() int { + return m.Size() +} +func (m *TimeoutAction) XXX_DiscardUnknown() { + xxx_messageInfo_TimeoutAction.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeoutAction proto.InternalMessageInfo + +func (m *TimeoutAction) GetAction() *Action { + if m != nil { + return m.Action + } + return nil +} + +// Deprecated: Do not use. +func (m *TimeoutAction) GetDeprecatedTimeoutNs() int64 { + if m != nil { + return m.DeprecatedTimeoutNs + } + return 0 +} + +func (m *TimeoutAction) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +func (m *TimeoutAction) GetTimeoutMs() int64 { + if m != nil { + return m.TimeoutMs + } + return 0 +} + +type EmitProgressAction struct { + Action *Action `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"` + StartMessage string `protobuf:"bytes,2,opt,name=start_message,json=startMessage,proto3" json:"start_message"` + SuccessMessage string `protobuf:"bytes,3,opt,name=success_message,json=successMessage,proto3" json:"success_message"` + FailureMessagePrefix string `protobuf:"bytes,4,opt,name=failure_message_prefix,json=failureMessagePrefix,proto3" json:"failure_message_prefix"` + LogSource string `protobuf:"bytes,5,opt,name=log_source,json=logSource,proto3" json:"log_source,omitempty"` +} + +func (m *EmitProgressAction) Reset() { *m = EmitProgressAction{} } +func (*EmitProgressAction) ProtoMessage() {} +func (*EmitProgressAction) Descriptor() ([]byte, []int) { + return fileDescriptor_eeb49063df94c2b8, []int{5} +} +func (m *EmitProgressAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EmitProgressAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EmitProgressAction.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EmitProgressAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_EmitProgressAction.Merge(m, src) +} +func (m *EmitProgressAction) XXX_Size() int { + return m.Size() +} +func (m *EmitProgressAction) XXX_DiscardUnknown() { + xxx_messageInfo_EmitProgressAction.DiscardUnknown(m) +} + +var xxx_messageInfo_EmitProgressAction proto.InternalMessageInfo + +func (m *EmitProgressAction) GetAction() *Action { + if m != nil { + return m.Action + } + return nil +} + +func (m *EmitProgressAction) GetStartMessage() string { + if m != nil { + return m.StartMessage + } + return "" +} + +func (m *EmitProgressAction) GetSuccessMessage() string { + if m != nil { + return m.SuccessMessage + } + return "" +} + +func (m *EmitProgressAction) GetFailureMessagePrefix() string { + if m != nil { + return m.FailureMessagePrefix + } + return "" +} + +func (m *EmitProgressAction) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +type TryAction struct { + Action *Action `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"` + LogSource string `protobuf:"bytes,2,opt,name=log_source,json=logSource,proto3" json:"log_source,omitempty"` +} + +func (m *TryAction) Reset() { *m = TryAction{} } +func (*TryAction) ProtoMessage() {} +func (*TryAction) Descriptor() ([]byte, []int) { + return fileDescriptor_eeb49063df94c2b8, []int{6} +} +func (m *TryAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TryAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TryAction.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TryAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_TryAction.Merge(m, src) +} +func (m *TryAction) XXX_Size() int { + return m.Size() +} +func (m *TryAction) XXX_DiscardUnknown() { + xxx_messageInfo_TryAction.DiscardUnknown(m) +} + +var xxx_messageInfo_TryAction proto.InternalMessageInfo + +func (m *TryAction) GetAction() *Action { + if m != nil { + return m.Action + } + return nil +} + +func (m *TryAction) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +type ParallelAction struct { + Actions []*Action `protobuf:"bytes,1,rep,name=actions,proto3" json:"actions,omitempty"` + LogSource string `protobuf:"bytes,2,opt,name=log_source,json=logSource,proto3" json:"log_source,omitempty"` +} + +func (m *ParallelAction) Reset() { *m = ParallelAction{} } +func (*ParallelAction) ProtoMessage() {} +func (*ParallelAction) Descriptor() ([]byte, []int) { + return fileDescriptor_eeb49063df94c2b8, []int{7} +} +func (m *ParallelAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParallelAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ParallelAction.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ParallelAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParallelAction.Merge(m, src) +} +func (m *ParallelAction) XXX_Size() int { + return m.Size() +} +func (m *ParallelAction) XXX_DiscardUnknown() { + xxx_messageInfo_ParallelAction.DiscardUnknown(m) +} + +var xxx_messageInfo_ParallelAction proto.InternalMessageInfo + +func (m *ParallelAction) GetActions() []*Action { + if m != nil { + return m.Actions + } + return nil +} + +func (m *ParallelAction) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +type SerialAction struct { + Actions []*Action `protobuf:"bytes,1,rep,name=actions,proto3" json:"actions,omitempty"` + LogSource string `protobuf:"bytes,2,opt,name=log_source,json=logSource,proto3" json:"log_source,omitempty"` +} + +func (m *SerialAction) Reset() { *m = SerialAction{} } +func (*SerialAction) ProtoMessage() {} +func (*SerialAction) Descriptor() ([]byte, []int) { + return fileDescriptor_eeb49063df94c2b8, []int{8} +} +func (m *SerialAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SerialAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SerialAction.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SerialAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_SerialAction.Merge(m, src) +} +func (m *SerialAction) XXX_Size() int { + return m.Size() +} +func (m *SerialAction) XXX_DiscardUnknown() { + xxx_messageInfo_SerialAction.DiscardUnknown(m) +} + +var xxx_messageInfo_SerialAction proto.InternalMessageInfo + +func (m *SerialAction) GetActions() []*Action { + if m != nil { + return m.Actions + } + return nil +} + +func (m *SerialAction) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +type CodependentAction struct { + Actions []*Action `protobuf:"bytes,1,rep,name=actions,proto3" json:"actions,omitempty"` + LogSource string `protobuf:"bytes,2,opt,name=log_source,json=logSource,proto3" json:"log_source,omitempty"` +} + +func (m *CodependentAction) Reset() { *m = CodependentAction{} } +func (*CodependentAction) ProtoMessage() {} +func (*CodependentAction) Descriptor() ([]byte, []int) { + return fileDescriptor_eeb49063df94c2b8, []int{9} +} +func (m *CodependentAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CodependentAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CodependentAction.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CodependentAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_CodependentAction.Merge(m, src) +} +func (m *CodependentAction) XXX_Size() int { + return m.Size() +} +func (m *CodependentAction) XXX_DiscardUnknown() { + xxx_messageInfo_CodependentAction.DiscardUnknown(m) +} + +var xxx_messageInfo_CodependentAction proto.InternalMessageInfo + +func (m *CodependentAction) GetActions() []*Action { + if m != nil { + return m.Actions + } + return nil +} + +func (m *CodependentAction) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +type ResourceLimits struct { + // Types that are valid to be assigned to OptionalNofile: + // + // *ResourceLimits_Nofile + OptionalNofile isResourceLimits_OptionalNofile `protobuf_oneof:"optional_nofile"` + // Types that are valid to be assigned to OptionalNproc: + // + // *ResourceLimits_Nproc + OptionalNproc isResourceLimits_OptionalNproc `protobuf_oneof:"optional_nproc"` +} + +func (m *ResourceLimits) Reset() { *m = ResourceLimits{} } +func (*ResourceLimits) ProtoMessage() {} +func (*ResourceLimits) Descriptor() ([]byte, []int) { + return fileDescriptor_eeb49063df94c2b8, []int{10} +} +func (m *ResourceLimits) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceLimits) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceLimits.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceLimits) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceLimits.Merge(m, src) +} +func (m *ResourceLimits) XXX_Size() int { + return m.Size() +} +func (m *ResourceLimits) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceLimits.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceLimits proto.InternalMessageInfo + +type isResourceLimits_OptionalNofile interface { + isResourceLimits_OptionalNofile() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} +type isResourceLimits_OptionalNproc interface { + isResourceLimits_OptionalNproc() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type ResourceLimits_Nofile struct { + Nofile uint64 `protobuf:"varint,1,opt,name=nofile,proto3,oneof" json:"nofile,omitempty"` +} +type ResourceLimits_Nproc struct { + Nproc uint64 `protobuf:"varint,2,opt,name=nproc,proto3,oneof" json:"nproc,omitempty"` +} + +func (*ResourceLimits_Nofile) isResourceLimits_OptionalNofile() {} +func (*ResourceLimits_Nproc) isResourceLimits_OptionalNproc() {} + +func (m *ResourceLimits) GetOptionalNofile() isResourceLimits_OptionalNofile { + if m != nil { + return m.OptionalNofile + } + return nil +} +func (m *ResourceLimits) GetOptionalNproc() isResourceLimits_OptionalNproc { + if m != nil { + return m.OptionalNproc + } + return nil +} + +func (m *ResourceLimits) GetNofile() uint64 { + if x, ok := m.GetOptionalNofile().(*ResourceLimits_Nofile); ok { + return x.Nofile + } + return 0 +} + +// Deprecated: Do not use. +func (m *ResourceLimits) GetNproc() uint64 { + if x, ok := m.GetOptionalNproc().(*ResourceLimits_Nproc); ok { + return x.Nproc + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ResourceLimits) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ResourceLimits_Nofile)(nil), + (*ResourceLimits_Nproc)(nil), + } +} + +func init() { + proto.RegisterType((*Action)(nil), "models.Action") + proto.RegisterType((*DownloadAction)(nil), "models.DownloadAction") + proto.RegisterType((*UploadAction)(nil), "models.UploadAction") + proto.RegisterType((*RunAction)(nil), "models.RunAction") + proto.RegisterType((*TimeoutAction)(nil), "models.TimeoutAction") + proto.RegisterType((*EmitProgressAction)(nil), "models.EmitProgressAction") + proto.RegisterType((*TryAction)(nil), "models.TryAction") + proto.RegisterType((*ParallelAction)(nil), "models.ParallelAction") + proto.RegisterType((*SerialAction)(nil), "models.SerialAction") + proto.RegisterType((*CodependentAction)(nil), "models.CodependentAction") + proto.RegisterType((*ResourceLimits)(nil), "models.ResourceLimits") +} + +func init() { proto.RegisterFile("actions.proto", fileDescriptor_eeb49063df94c2b8) } + +var fileDescriptor_eeb49063df94c2b8 = []byte{ + // 1073 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x4f, 0x6f, 0xdc, 0x44, + 0x14, 0x5f, 0xef, 0x6e, 0x36, 0xf1, 0x6b, 0xd6, 0xc9, 0x4e, 0xfe, 0x74, 0xbb, 0x01, 0x3b, 0x5a, + 0x09, 0x14, 0x21, 0x92, 0x4a, 0x05, 0x71, 0x42, 0x42, 0xdd, 0x82, 0xa8, 0xd4, 0x96, 0x46, 0x93, + 0xd2, 0x0a, 0x09, 0xc9, 0x72, 0xbc, 0xb3, 0x8e, 0x15, 0xdb, 0x63, 0x8d, 0xc7, 0x81, 0xbd, 0x71, + 0xe7, 0xc2, 0x17, 0xe0, 0x80, 0xb8, 0xf0, 0x45, 0x90, 0x38, 0x46, 0x9c, 0x7a, 0xb2, 0xc8, 0xe6, + 0x82, 0x7c, 0xea, 0x47, 0x40, 0x1e, 0xcf, 0x6c, 0xec, 0x4d, 0xab, 0xf4, 0x50, 0x2e, 0xd6, 0xbc, + 0xf7, 0x7e, 0xef, 0xf7, 0x66, 0xde, 0x7b, 0xf3, 0xc6, 0xd0, 0x75, 0x5c, 0xee, 0xd3, 0x28, 0x39, + 0x88, 0x19, 0xe5, 0x14, 0x75, 0x42, 0x3a, 0x26, 0x41, 0x32, 0xd8, 0xf7, 0x7c, 0x7e, 0x92, 0x1e, + 0x1f, 0xb8, 0x34, 0xbc, 0xeb, 0x51, 0x8f, 0xde, 0x15, 0xe6, 0xe3, 0x74, 0x22, 0x24, 0x21, 0x88, + 0x55, 0xe9, 0x36, 0xd8, 0x21, 0xd1, 0x99, 0xcf, 0x68, 0x14, 0x92, 0x88, 0xdb, 0x67, 0x0e, 0xf3, + 0x9d, 0xe3, 0x80, 0x48, 0xce, 0xe1, 0xcf, 0x1d, 0xe8, 0xdc, 0x17, 0x51, 0xd0, 0x0b, 0x58, 0x1b, + 0xd3, 0x1f, 0xa2, 0x80, 0x3a, 0x63, 0xbb, 0x0c, 0xdc, 0xd7, 0x76, 0xb5, 0xbd, 0x5b, 0xf7, 0xb6, + 0x0f, 0xca, 0xc0, 0x07, 0x5f, 0x4a, 0x73, 0xe9, 0x30, 0xda, 0xce, 0x33, 0x0b, 0x29, 0x97, 0x8f, + 0x69, 0xe8, 0x73, 0x12, 0xc6, 0x7c, 0x8a, 0x8d, 0x71, 0x0d, 0x87, 0x9e, 0x42, 0x37, 0x8d, 0xab, + 0xb4, 0x4d, 0x41, 0xbb, 0xa9, 0x68, 0xbf, 0x8d, 0x2b, 0xa4, 0x9b, 0x79, 0x66, 0xad, 0x97, 0xf0, + 0x0a, 0xe5, 0x6a, 0x5a, 0xc1, 0xa0, 0x07, 0x00, 0x2c, 0x8d, 0x14, 0x5b, 0x4b, 0xb0, 0xf5, 0x14, + 0x1b, 0x4e, 0x23, 0x49, 0xd5, 0xcb, 0x33, 0xab, 0xcb, 0xd2, 0xa8, 0xc2, 0xa3, 0x33, 0x65, 0x45, + 0x47, 0x60, 0x70, 0x3f, 0x24, 0x34, 0xe5, 0x8a, 0xa8, 0x2d, 0x88, 0xb6, 0x14, 0xd1, 0xb3, 0xd2, + 0x2a, 0xc9, 0xb6, 0xf2, 0xcc, 0xea, 0x49, 0x87, 0x0a, 0x61, 0x97, 0x57, 0x51, 0xc8, 0x87, 0x4d, + 0x12, 0xfa, 0xdc, 0x8e, 0x19, 0xf5, 0x18, 0x49, 0x12, 0x45, 0xbd, 0x24, 0xa8, 0x07, 0x8a, 0xfa, + 0xab, 0xd0, 0xe7, 0x87, 0x12, 0x22, 0xf9, 0x77, 0xf2, 0xcc, 0xba, 0x5d, 0xf3, 0xad, 0x44, 0x41, + 0xe4, 0x9a, 0x43, 0x91, 0x04, 0xce, 0xa6, 0x2a, 0x40, 0xa7, 0x9e, 0x84, 0x67, 0x6c, 0x5a, 0x4d, + 0x02, 0x67, 0xd3, 0x6a, 0x12, 0xb8, 0xb2, 0x16, 0x35, 0x8f, 0x1d, 0xe6, 0x04, 0x01, 0x09, 0x14, + 0xd3, 0x72, 0xbd, 0xe6, 0x87, 0xd2, 0x5c, 0xad, 0xb9, 0x72, 0xa9, 0xd6, 0x3c, 0xae, 0xe1, 0x8a, + 0x9a, 0x27, 0x84, 0xf9, 0xce, 0x9c, 0x76, 0xa5, 0x5e, 0xf3, 0x23, 0x61, 0xac, 0xd6, 0xbc, 0x84, + 0x57, 0x6b, 0x9e, 0x54, 0x30, 0xc8, 0x05, 0xe4, 0xd2, 0x31, 0x89, 0x49, 0x34, 0x2e, 0xfa, 0x58, + 0xb2, 0xea, 0x82, 0xf5, 0x8e, 0x62, 0x7d, 0x70, 0x85, 0x90, 0xd4, 0x77, 0xf2, 0xcc, 0xda, 0xaa, + 0x38, 0x56, 0xf8, 0x7b, 0xee, 0x22, 0x7a, 0xf8, 0x5b, 0x13, 0x8c, 0x7a, 0x93, 0xa3, 0x01, 0xac, + 0x38, 0x8c, 0xfb, 0x13, 0xc7, 0xe5, 0xe2, 0x3a, 0xe8, 0x78, 0x2e, 0xa3, 0xf7, 0xa0, 0x3d, 0x61, + 0x34, 0x14, 0xfd, 0xac, 0x8f, 0x56, 0xf2, 0xcc, 0x12, 0x32, 0x16, 0x5f, 0xb4, 0x0d, 0x4d, 0x4e, + 0x45, 0x77, 0xea, 0xa3, 0x4e, 0x9e, 0x59, 0x4d, 0x4e, 0x71, 0x93, 0x53, 0xf4, 0x11, 0xe8, 0xae, + 0xe3, 0x9e, 0x10, 0xfb, 0x94, 0x4c, 0x45, 0xcf, 0xe9, 0xa3, 0x6e, 0x9e, 0x59, 0x57, 0x4a, 0xbc, + 0x22, 0x96, 0x8f, 0xc8, 0x14, 0xbd, 0x0f, 0x10, 0x50, 0xcf, 0x4e, 0x68, 0xca, 0x5c, 0x22, 0xba, + 0x48, 0xc7, 0x7a, 0x40, 0xbd, 0x23, 0xa1, 0x28, 0x36, 0x90, 0x26, 0x84, 0x89, 0xea, 0xcb, 0x0d, + 0x14, 0x32, 0x16, 0x5f, 0xb4, 0x0f, 0xc8, 0x3d, 0x21, 0xee, 0x69, 0x92, 0x86, 0xb6, 0x13, 0x78, + 0x94, 0xf9, 0xfc, 0x24, 0x14, 0xf5, 0xd5, 0x71, 0x4f, 0x59, 0xee, 0x2b, 0x03, 0xfa, 0x00, 0x8c, + 0x39, 0xfc, 0xcc, 0x09, 0x52, 0x22, 0x6a, 0xa6, 0xe3, 0xae, 0xd2, 0x3e, 0x2f, 0x94, 0xc3, 0x5f, + 0x35, 0x58, 0xad, 0xde, 0xd8, 0xff, 0x21, 0x43, 0xf5, 0x53, 0xb7, 0xdf, 0x74, 0xea, 0xa5, 0xd7, + 0x9d, 0x7a, 0xf8, 0x67, 0x13, 0xf4, 0xf9, 0x0c, 0x28, 0xb0, 0xb1, 0xc3, 0x4f, 0xca, 0x8d, 0x95, + 0xd8, 0x42, 0xc6, 0xe2, 0x8b, 0x10, 0xb4, 0x1d, 0xe6, 0x25, 0xfd, 0xe6, 0x6e, 0x6b, 0x4f, 0xc7, + 0x62, 0x8d, 0xd6, 0xa1, 0x35, 0xf6, 0x59, 0xb9, 0x2b, 0x5c, 0x2c, 0xd1, 0x3e, 0xb4, 0x48, 0x74, + 0xd6, 0x6f, 0xef, 0xb6, 0xf6, 0x6e, 0xdd, 0xdb, 0x99, 0xdf, 0xe1, 0xab, 0xa9, 0xfa, 0x5c, 0x0e, + 0x55, 0x5c, 0xe0, 0xd0, 0x17, 0xb0, 0xc6, 0x48, 0xb9, 0x77, 0x3b, 0xf0, 0x43, 0x9f, 0x27, 0xf2, + 0xfa, 0xcf, 0xef, 0x14, 0x96, 0xe6, 0xc7, 0xc2, 0x8a, 0x0d, 0x56, 0x93, 0x6f, 0xa8, 0x6a, 0x3d, + 0x39, 0xcb, 0x8b, 0xc9, 0xf9, 0x1a, 0x36, 0x92, 0x34, 0x8e, 0xc5, 0xf0, 0x29, 0x70, 0x34, 0xe5, + 0x71, 0xca, 0x45, 0x29, 0x57, 0x46, 0xb7, 0xf3, 0xcc, 0x7a, 0x9d, 0x19, 0xf7, 0x94, 0xf2, 0x31, + 0xf5, 0x9e, 0x0a, 0xd5, 0xf0, 0x6f, 0x0d, 0xba, 0xb5, 0x11, 0x88, 0x3e, 0x84, 0x4e, 0xed, 0x5d, + 0x30, 0xd4, 0x79, 0x4a, 0x3b, 0x96, 0x56, 0xf4, 0x08, 0xb6, 0xc6, 0x24, 0x66, 0xc4, 0x75, 0x38, + 0x19, 0xdb, 0x6a, 0xc8, 0x46, 0x89, 0xe8, 0x82, 0x96, 0xd8, 0xc4, 0xf5, 0x49, 0xda, 0xd7, 0xf0, + 0xc6, 0x95, 0x97, 0x0c, 0xfc, 0x4d, 0xb2, 0x70, 0xdc, 0xd6, 0xe2, 0x71, 0xf7, 0x01, 0x54, 0x80, + 0x30, 0x11, 0xad, 0xd2, 0x1a, 0x19, 0x79, 0x66, 0x55, 0xb4, 0x58, 0x97, 0xeb, 0x27, 0xc9, 0xf0, + 0xf7, 0x26, 0xa0, 0xeb, 0xc3, 0xf7, 0xad, 0x4f, 0xf6, 0x19, 0x74, 0x13, 0xee, 0x30, 0x6e, 0x87, + 0x24, 0x49, 0x1c, 0x8f, 0xc8, 0xbe, 0x16, 0x33, 0xb6, 0x66, 0xc0, 0xab, 0x42, 0x7c, 0x52, 0x4a, + 0xe8, 0x73, 0x58, 0x4b, 0x52, 0xd7, 0x2d, 0x92, 0xae, 0x3c, 0xcb, 0xae, 0xdf, 0xc8, 0x33, 0x6b, + 0xd1, 0x84, 0x0d, 0xa9, 0x50, 0xde, 0x87, 0xb0, 0x3d, 0x71, 0xfc, 0x20, 0x65, 0x44, 0x41, 0xec, + 0x98, 0x91, 0x89, 0xff, 0xa3, 0x9c, 0x1e, 0x83, 0x3c, 0xb3, 0xde, 0x80, 0xc0, 0x9b, 0x52, 0x2f, + 0xb9, 0x0e, 0x85, 0xf6, 0x86, 0xb1, 0x32, 0xc4, 0xa0, 0xcf, 0x1f, 0x90, 0xb7, 0xce, 0x4d, 0x9d, + 0xb3, 0xb9, 0xc8, 0xf9, 0x1d, 0x18, 0xf5, 0xa7, 0x04, 0xed, 0xc1, 0xb2, 0xfc, 0xbf, 0xe9, 0x6b, + 0xe2, 0x6a, 0x2d, 0x32, 0x2b, 0xf3, 0x4d, 0xd4, 0x2f, 0x60, 0xb5, 0xfa, 0x9c, 0xbc, 0x3b, 0xe2, + 0xef, 0xa1, 0x77, 0xed, 0x45, 0x79, 0x77, 0xec, 0xa7, 0x60, 0xd4, 0x07, 0x01, 0xea, 0x43, 0x27, + 0xa2, 0x13, 0x3f, 0x20, 0x22, 0xd5, 0xed, 0x87, 0x0d, 0x2c, 0x65, 0x34, 0x80, 0xa5, 0x28, 0x66, + 0xd4, 0x15, 0x2c, 0xed, 0x51, 0xb3, 0xaf, 0x3d, 0xd4, 0x70, 0xa9, 0x1a, 0xf5, 0x60, 0x8d, 0xc6, + 0x45, 0x44, 0x27, 0xb0, 0x4b, 0xf8, 0x68, 0x1d, 0x8c, 0x2b, 0x95, 0x00, 0x7d, 0x7a, 0x7e, 0x61, + 0x36, 0x5e, 0x5e, 0x98, 0x8d, 0x57, 0x17, 0xa6, 0xf6, 0xd3, 0xcc, 0xd4, 0xfe, 0x98, 0x99, 0xda, + 0x5f, 0x33, 0x53, 0x3b, 0x9f, 0x99, 0xda, 0x3f, 0x33, 0x53, 0xfb, 0x77, 0x66, 0x36, 0x5e, 0xcd, + 0x4c, 0xed, 0x97, 0x4b, 0xb3, 0x71, 0x7e, 0x69, 0x36, 0x5e, 0x5e, 0x9a, 0x8d, 0xe3, 0x8e, 0xf8, + 0x49, 0xfc, 0xe4, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x26, 0xe4, 0x44, 0x5a, 0x89, 0x0a, 0x00, + 0x00, +} + +func (this *Action) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Action) + if !ok { + that2, ok := that.(Action) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.DownloadAction.Equal(that1.DownloadAction) { + return false + } + if !this.UploadAction.Equal(that1.UploadAction) { + return false + } + if !this.RunAction.Equal(that1.RunAction) { + return false + } + if !this.TimeoutAction.Equal(that1.TimeoutAction) { + return false + } + if !this.EmitProgressAction.Equal(that1.EmitProgressAction) { + return false + } + if !this.TryAction.Equal(that1.TryAction) { + return false + } + if !this.ParallelAction.Equal(that1.ParallelAction) { + return false + } + if !this.SerialAction.Equal(that1.SerialAction) { + return false + } + if !this.CodependentAction.Equal(that1.CodependentAction) { + return false + } + return true +} +func (this *DownloadAction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DownloadAction) + if !ok { + that2, ok := that.(DownloadAction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Artifact != that1.Artifact { + return false + } + if this.From != that1.From { + return false + } + if this.To != that1.To { + return false + } + if this.CacheKey != that1.CacheKey { + return false + } + if this.LogSource != that1.LogSource { + return false + } + if this.User != that1.User { + return false + } + if this.ChecksumAlgorithm != that1.ChecksumAlgorithm { + return false + } + if this.ChecksumValue != that1.ChecksumValue { + return false + } + return true +} +func (this *UploadAction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UploadAction) + if !ok { + that2, ok := that.(UploadAction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Artifact != that1.Artifact { + return false + } + if this.From != that1.From { + return false + } + if this.To != that1.To { + return false + } + if this.LogSource != that1.LogSource { + return false + } + if this.User != that1.User { + return false + } + return true +} +func (this *RunAction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RunAction) + if !ok { + that2, ok := that.(RunAction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Path != that1.Path { + return false + } + if len(this.Args) != len(that1.Args) { + return false + } + for i := range this.Args { + if this.Args[i] != that1.Args[i] { + return false + } + } + if this.Dir != that1.Dir { + return false + } + if len(this.Env) != len(that1.Env) { + return false + } + for i := range this.Env { + if !this.Env[i].Equal(that1.Env[i]) { + return false + } + } + if !this.ResourceLimits.Equal(that1.ResourceLimits) { + return false + } + if this.User != that1.User { + return false + } + if this.LogSource != that1.LogSource { + return false + } + if this.SuppressLogOutput != that1.SuppressLogOutput { + return false + } + return true +} +func (this *TimeoutAction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TimeoutAction) + if !ok { + that2, ok := that.(TimeoutAction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Action.Equal(that1.Action) { + return false + } + if this.DeprecatedTimeoutNs != that1.DeprecatedTimeoutNs { + return false + } + if this.LogSource != that1.LogSource { + return false + } + if this.TimeoutMs != that1.TimeoutMs { + return false + } + return true +} +func (this *EmitProgressAction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*EmitProgressAction) + if !ok { + that2, ok := that.(EmitProgressAction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Action.Equal(that1.Action) { + return false + } + if this.StartMessage != that1.StartMessage { + return false + } + if this.SuccessMessage != that1.SuccessMessage { + return false + } + if this.FailureMessagePrefix != that1.FailureMessagePrefix { + return false + } + if this.LogSource != that1.LogSource { + return false + } + return true +} +func (this *TryAction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TryAction) + if !ok { + that2, ok := that.(TryAction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Action.Equal(that1.Action) { + return false + } + if this.LogSource != that1.LogSource { + return false + } + return true +} +func (this *ParallelAction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ParallelAction) + if !ok { + that2, ok := that.(ParallelAction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Actions) != len(that1.Actions) { + return false + } + for i := range this.Actions { + if !this.Actions[i].Equal(that1.Actions[i]) { + return false + } + } + if this.LogSource != that1.LogSource { + return false + } + return true +} +func (this *SerialAction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SerialAction) + if !ok { + that2, ok := that.(SerialAction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Actions) != len(that1.Actions) { + return false + } + for i := range this.Actions { + if !this.Actions[i].Equal(that1.Actions[i]) { + return false + } + } + if this.LogSource != that1.LogSource { + return false + } + return true +} +func (this *CodependentAction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CodependentAction) + if !ok { + that2, ok := that.(CodependentAction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Actions) != len(that1.Actions) { + return false + } + for i := range this.Actions { + if !this.Actions[i].Equal(that1.Actions[i]) { + return false + } + } + if this.LogSource != that1.LogSource { + return false + } + return true +} +func (this *ResourceLimits) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResourceLimits) + if !ok { + that2, ok := that.(ResourceLimits) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if that1.OptionalNofile == nil { + if this.OptionalNofile != nil { + return false + } + } else if this.OptionalNofile == nil { + return false + } else if !this.OptionalNofile.Equal(that1.OptionalNofile) { + return false + } + if that1.OptionalNproc == nil { + if this.OptionalNproc != nil { + return false + } + } else if this.OptionalNproc == nil { + return false + } else if !this.OptionalNproc.Equal(that1.OptionalNproc) { + return false + } + return true +} +func (this *ResourceLimits_Nofile) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResourceLimits_Nofile) + if !ok { + that2, ok := that.(ResourceLimits_Nofile) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Nofile != that1.Nofile { + return false + } + return true +} +func (this *ResourceLimits_Nproc) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResourceLimits_Nproc) + if !ok { + that2, ok := that.(ResourceLimits_Nproc) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Nproc != that1.Nproc { + return false + } + return true +} +func (this *Action) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 13) + s = append(s, "&models.Action{") + if this.DownloadAction != nil { + s = append(s, "DownloadAction: "+fmt.Sprintf("%#v", this.DownloadAction)+",\n") + } + if this.UploadAction != nil { + s = append(s, "UploadAction: "+fmt.Sprintf("%#v", this.UploadAction)+",\n") + } + if this.RunAction != nil { + s = append(s, "RunAction: "+fmt.Sprintf("%#v", this.RunAction)+",\n") + } + if this.TimeoutAction != nil { + s = append(s, "TimeoutAction: "+fmt.Sprintf("%#v", this.TimeoutAction)+",\n") + } + if this.EmitProgressAction != nil { + s = append(s, "EmitProgressAction: "+fmt.Sprintf("%#v", this.EmitProgressAction)+",\n") + } + if this.TryAction != nil { + s = append(s, "TryAction: "+fmt.Sprintf("%#v", this.TryAction)+",\n") + } + if this.ParallelAction != nil { + s = append(s, "ParallelAction: "+fmt.Sprintf("%#v", this.ParallelAction)+",\n") + } + if this.SerialAction != nil { + s = append(s, "SerialAction: "+fmt.Sprintf("%#v", this.SerialAction)+",\n") + } + if this.CodependentAction != nil { + s = append(s, "CodependentAction: "+fmt.Sprintf("%#v", this.CodependentAction)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DownloadAction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 12) + s = append(s, "&models.DownloadAction{") + s = append(s, "Artifact: "+fmt.Sprintf("%#v", this.Artifact)+",\n") + s = append(s, "From: "+fmt.Sprintf("%#v", this.From)+",\n") + s = append(s, "To: "+fmt.Sprintf("%#v", this.To)+",\n") + s = append(s, "CacheKey: "+fmt.Sprintf("%#v", this.CacheKey)+",\n") + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "User: "+fmt.Sprintf("%#v", this.User)+",\n") + s = append(s, "ChecksumAlgorithm: "+fmt.Sprintf("%#v", this.ChecksumAlgorithm)+",\n") + s = append(s, "ChecksumValue: "+fmt.Sprintf("%#v", this.ChecksumValue)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UploadAction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&models.UploadAction{") + s = append(s, "Artifact: "+fmt.Sprintf("%#v", this.Artifact)+",\n") + s = append(s, "From: "+fmt.Sprintf("%#v", this.From)+",\n") + s = append(s, "To: "+fmt.Sprintf("%#v", this.To)+",\n") + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "User: "+fmt.Sprintf("%#v", this.User)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RunAction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 12) + s = append(s, "&models.RunAction{") + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + s = append(s, "Args: "+fmt.Sprintf("%#v", this.Args)+",\n") + s = append(s, "Dir: "+fmt.Sprintf("%#v", this.Dir)+",\n") + if this.Env != nil { + s = append(s, "Env: "+fmt.Sprintf("%#v", this.Env)+",\n") + } + if this.ResourceLimits != nil { + s = append(s, "ResourceLimits: "+fmt.Sprintf("%#v", this.ResourceLimits)+",\n") + } + s = append(s, "User: "+fmt.Sprintf("%#v", this.User)+",\n") + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "SuppressLogOutput: "+fmt.Sprintf("%#v", this.SuppressLogOutput)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TimeoutAction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&models.TimeoutAction{") + if this.Action != nil { + s = append(s, "Action: "+fmt.Sprintf("%#v", this.Action)+",\n") + } + s = append(s, "DeprecatedTimeoutNs: "+fmt.Sprintf("%#v", this.DeprecatedTimeoutNs)+",\n") + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "TimeoutMs: "+fmt.Sprintf("%#v", this.TimeoutMs)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EmitProgressAction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&models.EmitProgressAction{") + if this.Action != nil { + s = append(s, "Action: "+fmt.Sprintf("%#v", this.Action)+",\n") + } + s = append(s, "StartMessage: "+fmt.Sprintf("%#v", this.StartMessage)+",\n") + s = append(s, "SuccessMessage: "+fmt.Sprintf("%#v", this.SuccessMessage)+",\n") + s = append(s, "FailureMessagePrefix: "+fmt.Sprintf("%#v", this.FailureMessagePrefix)+",\n") + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TryAction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.TryAction{") + if this.Action != nil { + s = append(s, "Action: "+fmt.Sprintf("%#v", this.Action)+",\n") + } + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ParallelAction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ParallelAction{") + if this.Actions != nil { + s = append(s, "Actions: "+fmt.Sprintf("%#v", this.Actions)+",\n") + } + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SerialAction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.SerialAction{") + if this.Actions != nil { + s = append(s, "Actions: "+fmt.Sprintf("%#v", this.Actions)+",\n") + } + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CodependentAction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.CodependentAction{") + if this.Actions != nil { + s = append(s, "Actions: "+fmt.Sprintf("%#v", this.Actions)+",\n") + } + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ResourceLimits) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ResourceLimits{") + if this.OptionalNofile != nil { + s = append(s, "OptionalNofile: "+fmt.Sprintf("%#v", this.OptionalNofile)+",\n") + } + if this.OptionalNproc != nil { + s = append(s, "OptionalNproc: "+fmt.Sprintf("%#v", this.OptionalNproc)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ResourceLimits_Nofile) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&models.ResourceLimits_Nofile{` + + `Nofile:` + fmt.Sprintf("%#v", this.Nofile) + `}`}, ", ") + return s +} +func (this *ResourceLimits_Nproc) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&models.ResourceLimits_Nproc{` + + `Nproc:` + fmt.Sprintf("%#v", this.Nproc) + `}`}, ", ") + return s +} +func valueToGoStringActions(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Action) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Action) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Action) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CodependentAction != nil { + { + size, err := m.CodependentAction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.SerialAction != nil { + { + size, err := m.SerialAction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.ParallelAction != nil { + { + size, err := m.ParallelAction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.TryAction != nil { + { + size, err := m.TryAction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.EmitProgressAction != nil { + { + size, err := m.EmitProgressAction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.TimeoutAction != nil { + { + size, err := m.TimeoutAction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.RunAction != nil { + { + size, err := m.RunAction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.UploadAction != nil { + { + size, err := m.UploadAction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.DownloadAction != nil { + { + size, err := m.DownloadAction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DownloadAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DownloadAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DownloadAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChecksumValue) > 0 { + i -= len(m.ChecksumValue) + copy(dAtA[i:], m.ChecksumValue) + i = encodeVarintActions(dAtA, i, uint64(len(m.ChecksumValue))) + i-- + dAtA[i] = 0x42 + } + if len(m.ChecksumAlgorithm) > 0 { + i -= len(m.ChecksumAlgorithm) + copy(dAtA[i:], m.ChecksumAlgorithm) + i = encodeVarintActions(dAtA, i, uint64(len(m.ChecksumAlgorithm))) + i-- + dAtA[i] = 0x3a + } + if len(m.User) > 0 { + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintActions(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x32 + } + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintActions(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x2a + } + if len(m.CacheKey) > 0 { + i -= len(m.CacheKey) + copy(dAtA[i:], m.CacheKey) + i = encodeVarintActions(dAtA, i, uint64(len(m.CacheKey))) + i-- + dAtA[i] = 0x22 + } + if len(m.To) > 0 { + i -= len(m.To) + copy(dAtA[i:], m.To) + i = encodeVarintActions(dAtA, i, uint64(len(m.To))) + i-- + dAtA[i] = 0x1a + } + if len(m.From) > 0 { + i -= len(m.From) + copy(dAtA[i:], m.From) + i = encodeVarintActions(dAtA, i, uint64(len(m.From))) + i-- + dAtA[i] = 0x12 + } + if len(m.Artifact) > 0 { + i -= len(m.Artifact) + copy(dAtA[i:], m.Artifact) + i = encodeVarintActions(dAtA, i, uint64(len(m.Artifact))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UploadAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UploadAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UploadAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.User) > 0 { + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintActions(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x2a + } + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintActions(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x22 + } + if len(m.To) > 0 { + i -= len(m.To) + copy(dAtA[i:], m.To) + i = encodeVarintActions(dAtA, i, uint64(len(m.To))) + i-- + dAtA[i] = 0x1a + } + if len(m.From) > 0 { + i -= len(m.From) + copy(dAtA[i:], m.From) + i = encodeVarintActions(dAtA, i, uint64(len(m.From))) + i-- + dAtA[i] = 0x12 + } + if len(m.Artifact) > 0 { + i -= len(m.Artifact) + copy(dAtA[i:], m.Artifact) + i = encodeVarintActions(dAtA, i, uint64(len(m.Artifact))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RunAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SuppressLogOutput { + i-- + if m.SuppressLogOutput { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintActions(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x3a + } + if len(m.User) > 0 { + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintActions(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x32 + } + if m.ResourceLimits != nil { + { + size, err := m.ResourceLimits.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Dir) > 0 { + i -= len(m.Dir) + copy(dAtA[i:], m.Dir) + i = encodeVarintActions(dAtA, i, uint64(len(m.Dir))) + i-- + dAtA[i] = 0x1a + } + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarintActions(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintActions(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TimeoutAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeoutAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimeoutAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TimeoutMs != 0 { + i = encodeVarintActions(dAtA, i, uint64(m.TimeoutMs)) + i-- + dAtA[i] = 0x20 + } + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintActions(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x1a + } + if m.DeprecatedTimeoutNs != 0 { + i = encodeVarintActions(dAtA, i, uint64(m.DeprecatedTimeoutNs)) + i-- + dAtA[i] = 0x10 + } + if m.Action != nil { + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EmitProgressAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EmitProgressAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EmitProgressAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintActions(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x2a + } + if len(m.FailureMessagePrefix) > 0 { + i -= len(m.FailureMessagePrefix) + copy(dAtA[i:], m.FailureMessagePrefix) + i = encodeVarintActions(dAtA, i, uint64(len(m.FailureMessagePrefix))) + i-- + dAtA[i] = 0x22 + } + if len(m.SuccessMessage) > 0 { + i -= len(m.SuccessMessage) + copy(dAtA[i:], m.SuccessMessage) + i = encodeVarintActions(dAtA, i, uint64(len(m.SuccessMessage))) + i-- + dAtA[i] = 0x1a + } + if len(m.StartMessage) > 0 { + i -= len(m.StartMessage) + copy(dAtA[i:], m.StartMessage) + i = encodeVarintActions(dAtA, i, uint64(len(m.StartMessage))) + i-- + dAtA[i] = 0x12 + } + if m.Action != nil { + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TryAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TryAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TryAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintActions(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x12 + } + if m.Action != nil { + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ParallelAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParallelAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParallelAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintActions(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x12 + } + if len(m.Actions) > 0 { + for iNdEx := len(m.Actions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Actions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SerialAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SerialAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SerialAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintActions(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x12 + } + if len(m.Actions) > 0 { + for iNdEx := len(m.Actions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Actions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CodependentAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CodependentAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CodependentAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintActions(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x12 + } + if len(m.Actions) > 0 { + for iNdEx := len(m.Actions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Actions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActions(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResourceLimits) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceLimits) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceLimits) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.OptionalNproc != nil { + { + size := m.OptionalNproc.Size() + i -= size + if _, err := m.OptionalNproc.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.OptionalNofile != nil { + { + size := m.OptionalNofile.Size() + i -= size + if _, err := m.OptionalNofile.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *ResourceLimits_Nofile) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceLimits_Nofile) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintActions(dAtA, i, uint64(m.Nofile)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *ResourceLimits_Nproc) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceLimits_Nproc) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintActions(dAtA, i, uint64(m.Nproc)) + i-- + dAtA[i] = 0x10 + return len(dAtA) - i, nil +} +func encodeVarintActions(dAtA []byte, offset int, v uint64) int { + offset -= sovActions(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Action) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DownloadAction != nil { + l = m.DownloadAction.Size() + n += 1 + l + sovActions(uint64(l)) + } + if m.UploadAction != nil { + l = m.UploadAction.Size() + n += 1 + l + sovActions(uint64(l)) + } + if m.RunAction != nil { + l = m.RunAction.Size() + n += 1 + l + sovActions(uint64(l)) + } + if m.TimeoutAction != nil { + l = m.TimeoutAction.Size() + n += 1 + l + sovActions(uint64(l)) + } + if m.EmitProgressAction != nil { + l = m.EmitProgressAction.Size() + n += 1 + l + sovActions(uint64(l)) + } + if m.TryAction != nil { + l = m.TryAction.Size() + n += 1 + l + sovActions(uint64(l)) + } + if m.ParallelAction != nil { + l = m.ParallelAction.Size() + n += 1 + l + sovActions(uint64(l)) + } + if m.SerialAction != nil { + l = m.SerialAction.Size() + n += 1 + l + sovActions(uint64(l)) + } + if m.CodependentAction != nil { + l = m.CodependentAction.Size() + n += 1 + l + sovActions(uint64(l)) + } + return n +} + +func (m *DownloadAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Artifact) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.From) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.To) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.CacheKey) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.User) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.ChecksumAlgorithm) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.ChecksumValue) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + return n +} + +func (m *UploadAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Artifact) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.From) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.To) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.User) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + return n +} + +func (m *RunAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovActions(uint64(l)) + } + } + l = len(m.Dir) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovActions(uint64(l)) + } + } + if m.ResourceLimits != nil { + l = m.ResourceLimits.Size() + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.User) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + if m.SuppressLogOutput { + n += 2 + } + return n +} + +func (m *TimeoutAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != nil { + l = m.Action.Size() + n += 1 + l + sovActions(uint64(l)) + } + if m.DeprecatedTimeoutNs != 0 { + n += 1 + sovActions(uint64(m.DeprecatedTimeoutNs)) + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + if m.TimeoutMs != 0 { + n += 1 + sovActions(uint64(m.TimeoutMs)) + } + return n +} + +func (m *EmitProgressAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != nil { + l = m.Action.Size() + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.StartMessage) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.SuccessMessage) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.FailureMessagePrefix) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + return n +} + +func (m *TryAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != nil { + l = m.Action.Size() + n += 1 + l + sovActions(uint64(l)) + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + return n +} + +func (m *ParallelAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Actions) > 0 { + for _, e := range m.Actions { + l = e.Size() + n += 1 + l + sovActions(uint64(l)) + } + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + return n +} + +func (m *SerialAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Actions) > 0 { + for _, e := range m.Actions { + l = e.Size() + n += 1 + l + sovActions(uint64(l)) + } + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + return n +} + +func (m *CodependentAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Actions) > 0 { + for _, e := range m.Actions { + l = e.Size() + n += 1 + l + sovActions(uint64(l)) + } + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovActions(uint64(l)) + } + return n +} + +func (m *ResourceLimits) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OptionalNofile != nil { + n += m.OptionalNofile.Size() + } + if m.OptionalNproc != nil { + n += m.OptionalNproc.Size() + } + return n +} + +func (m *ResourceLimits_Nofile) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovActions(uint64(m.Nofile)) + return n +} +func (m *ResourceLimits_Nproc) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovActions(uint64(m.Nproc)) + return n +} + +func sovActions(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozActions(x uint64) (n int) { + return sovActions(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Action) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Action{`, + `DownloadAction:` + strings.Replace(this.DownloadAction.String(), "DownloadAction", "DownloadAction", 1) + `,`, + `UploadAction:` + strings.Replace(this.UploadAction.String(), "UploadAction", "UploadAction", 1) + `,`, + `RunAction:` + strings.Replace(this.RunAction.String(), "RunAction", "RunAction", 1) + `,`, + `TimeoutAction:` + strings.Replace(this.TimeoutAction.String(), "TimeoutAction", "TimeoutAction", 1) + `,`, + `EmitProgressAction:` + strings.Replace(this.EmitProgressAction.String(), "EmitProgressAction", "EmitProgressAction", 1) + `,`, + `TryAction:` + strings.Replace(this.TryAction.String(), "TryAction", "TryAction", 1) + `,`, + `ParallelAction:` + strings.Replace(this.ParallelAction.String(), "ParallelAction", "ParallelAction", 1) + `,`, + `SerialAction:` + strings.Replace(this.SerialAction.String(), "SerialAction", "SerialAction", 1) + `,`, + `CodependentAction:` + strings.Replace(this.CodependentAction.String(), "CodependentAction", "CodependentAction", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DownloadAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DownloadAction{`, + `Artifact:` + fmt.Sprintf("%v", this.Artifact) + `,`, + `From:` + fmt.Sprintf("%v", this.From) + `,`, + `To:` + fmt.Sprintf("%v", this.To) + `,`, + `CacheKey:` + fmt.Sprintf("%v", this.CacheKey) + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `ChecksumAlgorithm:` + fmt.Sprintf("%v", this.ChecksumAlgorithm) + `,`, + `ChecksumValue:` + fmt.Sprintf("%v", this.ChecksumValue) + `,`, + `}`, + }, "") + return s +} +func (this *UploadAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UploadAction{`, + `Artifact:` + fmt.Sprintf("%v", this.Artifact) + `,`, + `From:` + fmt.Sprintf("%v", this.From) + `,`, + `To:` + fmt.Sprintf("%v", this.To) + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `}`, + }, "") + return s +} +func (this *RunAction) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]*EnvironmentVariable{" + for _, f := range this.Env { + repeatedStringForEnv += strings.Replace(fmt.Sprintf("%v", f), "EnvironmentVariable", "EnvironmentVariable", 1) + "," + } + repeatedStringForEnv += "}" + s := strings.Join([]string{`&RunAction{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `Dir:` + fmt.Sprintf("%v", this.Dir) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `ResourceLimits:` + strings.Replace(this.ResourceLimits.String(), "ResourceLimits", "ResourceLimits", 1) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `SuppressLogOutput:` + fmt.Sprintf("%v", this.SuppressLogOutput) + `,`, + `}`, + }, "") + return s +} +func (this *TimeoutAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TimeoutAction{`, + `Action:` + strings.Replace(this.Action.String(), "Action", "Action", 1) + `,`, + `DeprecatedTimeoutNs:` + fmt.Sprintf("%v", this.DeprecatedTimeoutNs) + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `TimeoutMs:` + fmt.Sprintf("%v", this.TimeoutMs) + `,`, + `}`, + }, "") + return s +} +func (this *EmitProgressAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EmitProgressAction{`, + `Action:` + strings.Replace(this.Action.String(), "Action", "Action", 1) + `,`, + `StartMessage:` + fmt.Sprintf("%v", this.StartMessage) + `,`, + `SuccessMessage:` + fmt.Sprintf("%v", this.SuccessMessage) + `,`, + `FailureMessagePrefix:` + fmt.Sprintf("%v", this.FailureMessagePrefix) + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `}`, + }, "") + return s +} +func (this *TryAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TryAction{`, + `Action:` + strings.Replace(this.Action.String(), "Action", "Action", 1) + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `}`, + }, "") + return s +} +func (this *ParallelAction) String() string { + if this == nil { + return "nil" + } + repeatedStringForActions := "[]*Action{" + for _, f := range this.Actions { + repeatedStringForActions += strings.Replace(f.String(), "Action", "Action", 1) + "," + } + repeatedStringForActions += "}" + s := strings.Join([]string{`&ParallelAction{`, + `Actions:` + repeatedStringForActions + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `}`, + }, "") + return s +} +func (this *SerialAction) String() string { + if this == nil { + return "nil" + } + repeatedStringForActions := "[]*Action{" + for _, f := range this.Actions { + repeatedStringForActions += strings.Replace(f.String(), "Action", "Action", 1) + "," + } + repeatedStringForActions += "}" + s := strings.Join([]string{`&SerialAction{`, + `Actions:` + repeatedStringForActions + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `}`, + }, "") + return s +} +func (this *CodependentAction) String() string { + if this == nil { + return "nil" + } + repeatedStringForActions := "[]*Action{" + for _, f := range this.Actions { + repeatedStringForActions += strings.Replace(f.String(), "Action", "Action", 1) + "," + } + repeatedStringForActions += "}" + s := strings.Join([]string{`&CodependentAction{`, + `Actions:` + repeatedStringForActions + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceLimits) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceLimits{`, + `OptionalNofile:` + fmt.Sprintf("%v", this.OptionalNofile) + `,`, + `OptionalNproc:` + fmt.Sprintf("%v", this.OptionalNproc) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceLimits_Nofile) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceLimits_Nofile{`, + `Nofile:` + fmt.Sprintf("%v", this.Nofile) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceLimits_Nproc) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceLimits_Nproc{`, + `Nproc:` + fmt.Sprintf("%v", this.Nproc) + `,`, + `}`, + }, "") + return s +} +func valueToStringActions(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Action) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Action: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Action: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DownloadAction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DownloadAction == nil { + m.DownloadAction = &DownloadAction{} + } + if err := m.DownloadAction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UploadAction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UploadAction == nil { + m.UploadAction = &UploadAction{} + } + if err := m.UploadAction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RunAction == nil { + m.RunAction = &RunAction{} + } + if err := m.RunAction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutAction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TimeoutAction == nil { + m.TimeoutAction = &TimeoutAction{} + } + if err := m.TimeoutAction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EmitProgressAction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EmitProgressAction == nil { + m.EmitProgressAction = &EmitProgressAction{} + } + if err := m.EmitProgressAction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TryAction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TryAction == nil { + m.TryAction = &TryAction{} + } + if err := m.TryAction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParallelAction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ParallelAction == nil { + m.ParallelAction = &ParallelAction{} + } + if err := m.ParallelAction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SerialAction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SerialAction == nil { + m.SerialAction = &SerialAction{} + } + if err := m.SerialAction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CodependentAction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CodependentAction == nil { + m.CodependentAction = &CodependentAction{} + } + if err := m.CodependentAction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActions(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActions + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DownloadAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DownloadAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DownloadAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Artifact", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Artifact = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.From = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.To = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CacheKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CacheKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChecksumAlgorithm", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChecksumAlgorithm = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChecksumValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChecksumValue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActions(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActions + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UploadAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UploadAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UploadAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Artifact", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Artifact = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.From = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.To = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActions(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActions + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, &EnvironmentVariable{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceLimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceLimits == nil { + m.ResourceLimits = &ResourceLimits{} + } + if err := m.ResourceLimits.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SuppressLogOutput", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SuppressLogOutput = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipActions(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActions + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeoutAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeoutAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeoutAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Action == nil { + m.Action = &Action{} + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedTimeoutNs", wireType) + } + m.DeprecatedTimeoutNs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DeprecatedTimeoutNs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutMs", wireType) + } + m.TimeoutMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeoutMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipActions(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActions + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EmitProgressAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EmitProgressAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EmitProgressAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Action == nil { + m.Action = &Action{} + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartMessage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StartMessage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessMessage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SuccessMessage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureMessagePrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FailureMessagePrefix = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActions(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActions + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TryAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TryAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TryAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Action == nil { + m.Action = &Action{} + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActions(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActions + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParallelAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParallelAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParallelAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Actions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Actions = append(m.Actions, &Action{}) + if err := m.Actions[len(m.Actions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActions(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActions + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SerialAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SerialAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SerialAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Actions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Actions = append(m.Actions, &Action{}) + if err := m.Actions[len(m.Actions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActions(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActions + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CodependentAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CodependentAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CodependentAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Actions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Actions = append(m.Actions, &Action{}) + if err := m.Actions[len(m.Actions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActions(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActions + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceLimits) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceLimits: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceLimits: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nofile", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.OptionalNofile = &ResourceLimits_Nofile{v} + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nproc", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.OptionalNproc = &ResourceLimits_Nproc{v} + default: + iNdEx = preIndex + skippy, err := skipActions(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActions + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipActions(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowActions + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowActions + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowActions + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthActions + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupActions + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthActions + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthActions = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowActions = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupActions = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/actions.proto b/vendor/code.cloudfoundry.org/bbs/models/actions.proto new file mode 100644 index 00000000..7b29cc45 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/actions.proto @@ -0,0 +1,102 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "environment_variables.proto"; + +message Action { + // Note: we only expect one of the following set of fields to be + // set. Previously we used `option (gogoproto.onlyone) = true' but since this + // is now deprecated and oneof introduces a lot of structural changes, we + // deferred on switching to oneof for now until there is a good reason for it. + // disadvantages of using multiple optionals as opposed to oneof are: + // - less memory usage + // disadvantages of using multiple optionals without onlyone: + // - writing our own GetAction/SetAction methods + // action oneof { + DownloadAction download_action = 1 [(gogoproto.jsontag) = "download,omitempty"]; + UploadAction upload_action = 2 [(gogoproto.jsontag) = "upload,omitempty"]; + RunAction run_action = 3 [(gogoproto.jsontag) = "run,omitempty"]; + TimeoutAction timeout_action = 4 [(gogoproto.jsontag) = "timeout,omitempty"]; + EmitProgressAction emit_progress_action = 5 [(gogoproto.jsontag) = "emit_progress,omitempty"]; + TryAction try_action = 6 [(gogoproto.jsontag) = "try,omitempty"]; + ParallelAction parallel_action = 7 [(gogoproto.jsontag) = "parallel,omitempty"]; + SerialAction serial_action = 8 [(gogoproto.jsontag) = "serial,omitempty"]; + CodependentAction codependent_action = 9 [(gogoproto.jsontag) = "codependent,omitempty"]; + // } +} + +message DownloadAction { + string artifact = 1; + string from = 2 [(gogoproto.jsontag) = "from"]; + string to = 3 [(gogoproto.jsontag) = "to"]; + string cache_key = 4 [(gogoproto.jsontag) = "cache_key"]; + string log_source = 5; + string user = 6 [(gogoproto.jsontag) = "user"] ; + string checksum_algorithm = 7; + string checksum_value = 8; +} + +message UploadAction { + string artifact = 1; + string from = 2 [(gogoproto.jsontag) = "from"]; + string to = 3 [(gogoproto.jsontag) = "to"]; + string log_source = 4; + string user = 5 [(gogoproto.jsontag) = "user"]; +} + +message RunAction { + string path = 1 [(gogoproto.jsontag) = "path"]; + repeated string args = 2; + string dir = 3; + repeated EnvironmentVariable env = 4; + ResourceLimits resource_limits = 5; + string user = 6 [(gogoproto.jsontag) = "user"]; + string log_source = 7; + bool suppress_log_output = 8 [(gogoproto.jsontag) = "suppress_log_output"]; +} + +message TimeoutAction { + Action action = 1; + int64 deprecated_timeout_ns = 2 [(gogoproto.jsontag) = "timeout,omitempty", deprecated=true]; + string log_source = 3; + int64 timeout_ms = 4 [(gogoproto.jsontag) = "timeout_ms"]; +} + +message EmitProgressAction { + Action action = 1; + string start_message = 2 [(gogoproto.jsontag) = "start_message"]; + string success_message = 3 [(gogoproto.jsontag) = "success_message"]; + string failure_message_prefix = 4 [(gogoproto.jsontag) = "failure_message_prefix"]; + string log_source = 5; +} + +message TryAction { + Action action = 1; + string log_source = 2; +} + +message ParallelAction { + repeated Action actions = 1; + string log_source = 2; +} + +message SerialAction { + repeated Action actions = 1; + string log_source = 2; +} + +message CodependentAction { + repeated Action actions = 1; + string log_source = 2; +} + +message ResourceLimits { + oneof optional_nofile { + uint64 nofile = 1; + } + oneof optional_nproc { + uint64 nproc = 2 [deprecated=true]; + } +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/actual_lrp.go b/vendor/code.cloudfoundry.org/bbs/models/actual_lrp.go new file mode 100644 index 00000000..9ad655b5 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/actual_lrp.go @@ -0,0 +1,521 @@ +package models + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "code.cloudfoundry.org/bbs/format" +) + +const ( + ActualLRPStateUnclaimed = "UNCLAIMED" + ActualLRPStateClaimed = "CLAIMED" + ActualLRPStateRunning = "RUNNING" + ActualLRPStateCrashed = "CRASHED" + + CrashResetTimeout = 5 * time.Minute + RetireActualLRPRetryAttempts = 5 +) + +var ActualLRPStates = []string{ + ActualLRPStateUnclaimed, + ActualLRPStateClaimed, + ActualLRPStateRunning, + ActualLRPStateCrashed, +} + +// Deprecated: use the ActualLRPInstances API instead +type ActualLRPChange struct { + Before *ActualLRPGroup + After *ActualLRPGroup +} + +type ActualLRPFilter struct { + Domain string + CellID string + ProcessGuid string + Index *int32 +} + +func NewActualLRPKey(processGuid string, index int32, domain string) ActualLRPKey { + return ActualLRPKey{processGuid, index, domain} +} + +func NewActualLRPInstanceKey(instanceGuid string, cellId string) ActualLRPInstanceKey { + return ActualLRPInstanceKey{instanceGuid, cellId} +} + +func NewActualLRPNetInfo(address string, instanceAddress string, preferredAddress ActualLRPNetInfo_PreferredAddress, ports ...*PortMapping) ActualLRPNetInfo { + return ActualLRPNetInfo{address, ports, instanceAddress, preferredAddress} +} + +func EmptyActualLRPNetInfo() ActualLRPNetInfo { + return NewActualLRPNetInfo("", "", ActualLRPNetInfo_PreferredAddressUnknown) +} + +func (info ActualLRPNetInfo) Empty() bool { + return info.Address == "" && len(info.Ports) == 0 && info.PreferredAddress == ActualLRPNetInfo_PreferredAddressUnknown +} + +func (*ActualLRPNetInfo) Version() format.Version { + return format.V0 +} + +func (d *ActualLRPNetInfo_PreferredAddress) UnmarshalJSON(data []byte) error { + var name string + if err := json.Unmarshal(data, &name); err != nil { + return err + } + + if v, found := ActualLRPNetInfo_PreferredAddress_value[name]; found { + *d = ActualLRPNetInfo_PreferredAddress(v) + return nil + } + return fmt.Errorf("invalid preferred address: %s", name) +} + +func (d ActualLRPNetInfo_PreferredAddress) MarshalJSON() ([]byte, error) { + return json.Marshal(d.String()) +} + +func NewPortMapping(hostPort, containerPort uint32) *PortMapping { + return &PortMapping{ + HostPort: hostPort, + ContainerPort: containerPort, + } +} + +func NewPortMappingWithTLSProxy(hostPort, containerPort, tlsHost, tlsContainer uint32) *PortMapping { + return &PortMapping{ + HostPort: hostPort, + ContainerPort: containerPort, + ContainerTlsProxyPort: tlsContainer, + HostTlsProxyPort: tlsHost, + } +} + +func (key ActualLRPInstanceKey) Empty() bool { + return key.InstanceGuid == "" && key.CellId == "" +} +func (a *ActualLRP) Copy() *ActualLRP { + newActualLRP := *a + return &newActualLRP +} + +const StaleUnclaimedActualLRPDuration = 30 * time.Second + +func (actual ActualLRP) ShouldStartUnclaimed(now time.Time) bool { + if actual.State != ActualLRPStateUnclaimed { + return false + } + + if now.Sub(time.Unix(0, actual.Since)) > StaleUnclaimedActualLRPDuration { + return true + } + + return false +} + +func (actual ActualLRP) CellIsMissing(cellSet CellSet) bool { + if actual.State == ActualLRPStateUnclaimed || + actual.State == ActualLRPStateCrashed { + return false + } + + return !cellSet.HasCellID(actual.CellId) +} + +func (actual ActualLRP) ShouldRestartImmediately(calc RestartCalculator) bool { + if actual.State != ActualLRPStateCrashed { + return false + } + + return calc.ShouldRestart(0, 0, actual.CrashCount) +} + +func (actual ActualLRP) ShouldRestartCrash(now time.Time, calc RestartCalculator) bool { + if actual.State != ActualLRPStateCrashed { + return false + } + + return calc.ShouldRestart(now.UnixNano(), actual.Since, actual.CrashCount) +} + +func (actual *ActualLRP) SetRoutable(routable bool) { + actual.OptionalRoutable = &ActualLRP_Routable{ + Routable: routable, + } +} + +func (actual *ActualLRP) RoutableExists() bool { + _, ok := actual.GetOptionalRoutable().(*ActualLRP_Routable) + return ok +} + +func (before ActualLRP) AllowsTransitionTo(lrpKey *ActualLRPKey, instanceKey *ActualLRPInstanceKey, newState string) bool { + if !before.ActualLRPKey.Equal(lrpKey) { + return false + } + + var valid bool + switch before.State { + case ActualLRPStateUnclaimed: + valid = newState == ActualLRPStateUnclaimed || + newState == ActualLRPStateClaimed || + newState == ActualLRPStateRunning + case ActualLRPStateClaimed: + valid = newState == ActualLRPStateUnclaimed && instanceKey.Empty() || + newState == ActualLRPStateClaimed && before.ActualLRPInstanceKey.Equal(instanceKey) || + newState == ActualLRPStateRunning || + newState == ActualLRPStateCrashed && before.ActualLRPInstanceKey.Equal(instanceKey) + case ActualLRPStateRunning: + valid = newState == ActualLRPStateUnclaimed && instanceKey.Empty() || + newState == ActualLRPStateClaimed && before.ActualLRPInstanceKey.Equal(instanceKey) || + newState == ActualLRPStateRunning && before.ActualLRPInstanceKey.Equal(instanceKey) || + newState == ActualLRPStateCrashed && before.ActualLRPInstanceKey.Equal(instanceKey) + case ActualLRPStateCrashed: + valid = newState == ActualLRPStateUnclaimed && instanceKey.Empty() || + newState == ActualLRPStateClaimed && before.ActualLRPInstanceKey.Equal(instanceKey) || + newState == ActualLRPStateRunning && before.ActualLRPInstanceKey.Equal(instanceKey) + } + + return valid +} + +func (d *ActualLRP_Presence) UnmarshalJSON(data []byte) error { + var name string + if err := json.Unmarshal(data, &name); err != nil { + return err + } + + if v, found := ActualLRP_Presence_value[name]; found { + *d = ActualLRP_Presence(v) + return nil + } + return fmt.Errorf("invalid presence: %s", name) +} + +func (d ActualLRP_Presence) MarshalJSON() ([]byte, error) { + return json.Marshal(d.String()) +} + +// Deprecated: use the ActualLRPInstances API instead +func NewRunningActualLRPGroup(actualLRP *ActualLRP) *ActualLRPGroup { + return &ActualLRPGroup{ + Instance: actualLRP, + } +} + +// Deprecated: use the ActualLRPInstances API instead +func NewEvacuatingActualLRPGroup(actualLRP *ActualLRP) *ActualLRPGroup { + return &ActualLRPGroup{ + Evacuating: actualLRP, + } +} + +// Deprecated: use the ActualLRPInstances API instead +func (group ActualLRPGroup) Resolve() (*ActualLRP, bool, error) { + switch { + case group.Instance == nil && group.Evacuating == nil: + return nil, false, ErrActualLRPGroupInvalid + + case group.Instance == nil: + return group.Evacuating, true, nil + + case group.Evacuating == nil: + return group.Instance, false, nil + + case group.Instance.State == ActualLRPStateRunning || group.Instance.State == ActualLRPStateCrashed: + return group.Instance, false, nil + + default: + return group.Evacuating, true, nil + } +} + +func NewUnclaimedActualLRP(lrpKey ActualLRPKey, since int64) *ActualLRP { + return &ActualLRP{ + ActualLRPKey: lrpKey, + State: ActualLRPStateUnclaimed, + Since: since, + } +} + +func NewClaimedActualLRP(lrpKey ActualLRPKey, instanceKey ActualLRPInstanceKey, since int64) *ActualLRP { + return &ActualLRP{ + ActualLRPKey: lrpKey, + ActualLRPInstanceKey: instanceKey, + State: ActualLRPStateClaimed, + Since: since, + } +} + +func NewRunningActualLRP(lrpKey ActualLRPKey, instanceKey ActualLRPInstanceKey, netInfo ActualLRPNetInfo, since int64) *ActualLRP { + return &ActualLRP{ + ActualLRPKey: lrpKey, + ActualLRPInstanceKey: instanceKey, + ActualLRPNetInfo: netInfo, + State: ActualLRPStateRunning, + Since: since, + } +} + +func (*ActualLRP) Version() format.Version { + return format.V0 +} + +func (actualLRPInfo *ActualLRPInfo) ToActualLRP(lrpKey ActualLRPKey, lrpInstanceKey ActualLRPInstanceKey) *ActualLRP { + if actualLRPInfo == nil { + return nil + } + lrp := ActualLRP{ + ActualLRPKey: lrpKey, + ActualLRPInstanceKey: lrpInstanceKey, + ActualLRPNetInfo: actualLRPInfo.ActualLRPNetInfo, + AvailabilityZone: actualLRPInfo.AvailabilityZone, + CrashCount: actualLRPInfo.CrashCount, + CrashReason: actualLRPInfo.CrashReason, + State: actualLRPInfo.State, + PlacementError: actualLRPInfo.PlacementError, + Since: actualLRPInfo.Since, + ModificationTag: actualLRPInfo.ModificationTag, + Presence: actualLRPInfo.Presence, + } + + if actualLRPInfo.RoutableExists() { + lrp.SetRoutable(actualLRPInfo.GetRoutable()) + } + + return &lrp +} + +func (actual *ActualLRP) ToActualLRPInfo() *ActualLRPInfo { + if actual == nil { + return nil + } + info := ActualLRPInfo{ + ActualLRPNetInfo: actual.ActualLRPNetInfo, + AvailabilityZone: actual.AvailabilityZone, + CrashCount: actual.CrashCount, + CrashReason: actual.CrashReason, + State: actual.State, + PlacementError: actual.PlacementError, + Since: actual.Since, + ModificationTag: actual.ModificationTag, + Presence: actual.Presence, + } + + if actual.RoutableExists() { + info.SetRoutable(actual.GetRoutable()) + } + return &info +} + +// Deprecated: use the ActualLRPInstances API instead +func (actual *ActualLRP) ToActualLRPGroup() *ActualLRPGroup { + if actual == nil { + return nil + } + + switch actual.Presence { + case ActualLRP_Evacuating: + return &ActualLRPGroup{Evacuating: actual} + default: + return &ActualLRPGroup{Instance: actual} + } +} + +func (actual ActualLRP) Validate() error { + var validationError ValidationError + + err := actual.ActualLRPKey.Validate() + if err != nil { + validationError = validationError.Append(err) + } + + if actual.Since == 0 { + validationError = validationError.Append(ErrInvalidField{"since"}) + } + + switch actual.State { + case ActualLRPStateUnclaimed: + if !actual.ActualLRPInstanceKey.Empty() { + validationError = validationError.Append(errors.New("instance key cannot be set when state is unclaimed")) + } + if !actual.ActualLRPNetInfo.Empty() { + validationError = validationError.Append(errors.New("net info cannot be set when state is unclaimed")) + } + if actual.Presence != ActualLRP_Ordinary { + validationError = validationError.Append(errors.New("presence cannot be set when state is unclaimed")) + } + + case ActualLRPStateClaimed: + if err := actual.ActualLRPInstanceKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + if !actual.ActualLRPNetInfo.Empty() { + validationError = validationError.Append(errors.New("net info cannot be set when state is claimed")) + } + if strings.TrimSpace(actual.PlacementError) != "" { + validationError = validationError.Append(errors.New("placement error cannot be set when state is claimed")) + } + + case ActualLRPStateRunning: + if err := actual.ActualLRPInstanceKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + if err := actual.ActualLRPNetInfo.Validate(); err != nil { + validationError = validationError.Append(err) + } + if strings.TrimSpace(actual.PlacementError) != "" { + validationError = validationError.Append(errors.New("placement error cannot be set when state is running")) + } + + case ActualLRPStateCrashed: + if !actual.ActualLRPInstanceKey.Empty() { + validationError = validationError.Append(errors.New("instance key cannot be set when state is crashed")) + } + if !actual.ActualLRPNetInfo.Empty() { + validationError = validationError.Append(errors.New("net info cannot be set when state is crashed")) + } + if strings.TrimSpace(actual.PlacementError) != "" { + validationError = validationError.Append(errors.New("placement error cannot be set when state is crashed")) + } + + default: + validationError = validationError.Append(ErrInvalidField{"state"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (key *ActualLRPKey) Validate() error { + var validationError ValidationError + + if key.ProcessGuid == "" { + validationError = validationError.Append(ErrInvalidField{"process_guid"}) + } + + if key.Index < 0 { + validationError = validationError.Append(ErrInvalidField{"index"}) + } + + if key.Domain == "" { + validationError = validationError.Append(ErrInvalidField{"domain"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (key *ActualLRPNetInfo) Validate() error { + var validationError ValidationError + + if key.Address == "" { + return validationError.Append(ErrInvalidField{"address"}) + } + + return nil +} + +func (key *ActualLRPInstanceKey) Validate() error { + var validationError ValidationError + + if key.CellId == "" { + validationError = validationError.Append(ErrInvalidField{"cell_id"}) + } + + if key.InstanceGuid == "" { + validationError = validationError.Append(ErrInvalidField{"instance_guid"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +// hasHigherPriority returns true if lrp1 takes precendence over lrp2 +func hasHigherPriority(lrp1, lrp2 *ActualLRP) bool { + if lrp1 == nil { + return false + } + + if lrp2 == nil { + return true + } + + if lrp1.Presence == ActualLRP_Ordinary { + switch lrp1.State { + case ActualLRPStateRunning: + return true + case ActualLRPStateClaimed: + return lrp2.State != ActualLRPStateRunning && lrp2.State != ActualLRPStateClaimed + } + } else if lrp1.Presence == ActualLRP_Suspect { + switch lrp1.State { + case ActualLRPStateRunning: + return lrp2.State != ActualLRPStateRunning + case ActualLRPStateClaimed: + return lrp2.State != ActualLRPStateRunning + } + } + // Cases where we are comparing two LRPs with the same presence have undefined behavior since it shouldn't happen + // with the way they're stored in the database + return false +} + +// ResolveActualLRPGroups convert the given set of lrp instances into +// ActualLRPGroup. This conversion is lossy. A suspect LRP is given +// precendence over an Ordinary instance if it is Running. Otherwise, the +// Ordinary instance is returned in the Instance field of the ActualLRPGroup. +// Deprecated: use the ActualLRPInstances API instead +func ResolveActualLRPGroups(lrps []*ActualLRP) []*ActualLRPGroup { + mapOfGroups := map[ActualLRPKey]*ActualLRPGroup{} + result := []*ActualLRPGroup{} + for _, actualLRP := range lrps { + // Every actual LRP has potentially 2 rows in the database: one for the instance + // one for the evacuating. When building the list of actual LRP groups (where + // a group is the instance and corresponding evacuating), make sure we don't add the same + // actual lrp twice. + if mapOfGroups[actualLRP.ActualLRPKey] == nil { + mapOfGroups[actualLRP.ActualLRPKey] = &ActualLRPGroup{} + result = append(result, mapOfGroups[actualLRP.ActualLRPKey]) + } + if actualLRP.Presence == ActualLRP_Evacuating { + mapOfGroups[actualLRP.ActualLRPKey].Evacuating = actualLRP + } else if hasHigherPriority(actualLRP, mapOfGroups[actualLRP.ActualLRPKey].Instance) { + mapOfGroups[actualLRP.ActualLRPKey].Instance = actualLRP + } + } + + return result +} + +// ResolveToActualLRPGroup calls ResolveActualLRPGroups and return the first +// LRP group. It panics if there are more than one group. If there no LRP +// groups were returned by ResolveActualLRPGroups, then an empty ActualLRPGroup +// is returned. +// Deprecated: use the ActualLRPInstances API instead +func ResolveActualLRPGroup(lrps []*ActualLRP) *ActualLRPGroup { + actualLRPGroups := ResolveActualLRPGroups(lrps) + switch len(actualLRPGroups) { + case 0: + return &ActualLRPGroup{} + case 1: + return actualLRPGroups[0] + default: + panic("shouldn't get here") + } +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/actual_lrp.pb.go b/vendor/code.cloudfoundry.org/bbs/models/actual_lrp.pb.go new file mode 100644 index 00000000..e9c46897 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/actual_lrp.pb.go @@ -0,0 +1,3220 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: actual_lrp.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ActualLRPNetInfo_PreferredAddress int32 + +const ( + ActualLRPNetInfo_PreferredAddressUnknown ActualLRPNetInfo_PreferredAddress = 0 + ActualLRPNetInfo_PreferredAddressInstance ActualLRPNetInfo_PreferredAddress = 1 + ActualLRPNetInfo_PreferredAddressHost ActualLRPNetInfo_PreferredAddress = 2 +) + +var ActualLRPNetInfo_PreferredAddress_name = map[int32]string{ + 0: "UNKNOWN", + 1: "INSTANCE", + 2: "HOST", +} + +var ActualLRPNetInfo_PreferredAddress_value = map[string]int32{ + "UNKNOWN": 0, + "INSTANCE": 1, + "HOST": 2, +} + +func (ActualLRPNetInfo_PreferredAddress) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_25e5e77bfca46c1a, []int{4, 0} +} + +type ActualLRP_Presence int32 + +const ( + ActualLRP_Ordinary ActualLRP_Presence = 0 + ActualLRP_Evacuating ActualLRP_Presence = 1 + ActualLRP_Suspect ActualLRP_Presence = 2 +) + +var ActualLRP_Presence_name = map[int32]string{ + 0: "ORDINARY", + 1: "EVACUATING", + 2: "SUSPECT", +} + +var ActualLRP_Presence_value = map[string]int32{ + "ORDINARY": 0, + "EVACUATING": 1, + "SUSPECT": 2, +} + +func (ActualLRP_Presence) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_25e5e77bfca46c1a, []int{6, 0} +} + +// Deprecated: Do not use. +type ActualLRPGroup struct { + Instance *ActualLRP `protobuf:"bytes,1,opt,name=instance,proto3" json:"instance,omitempty"` + Evacuating *ActualLRP `protobuf:"bytes,2,opt,name=evacuating,proto3" json:"evacuating,omitempty"` +} + +func (m *ActualLRPGroup) Reset() { *m = ActualLRPGroup{} } +func (*ActualLRPGroup) ProtoMessage() {} +func (*ActualLRPGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_25e5e77bfca46c1a, []int{0} +} +func (m *ActualLRPGroup) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPGroup.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPGroup.Merge(m, src) +} +func (m *ActualLRPGroup) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPGroup) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPGroup proto.InternalMessageInfo + +func (m *ActualLRPGroup) GetInstance() *ActualLRP { + if m != nil { + return m.Instance + } + return nil +} + +func (m *ActualLRPGroup) GetEvacuating() *ActualLRP { + if m != nil { + return m.Evacuating + } + return nil +} + +type PortMapping struct { + ContainerPort uint32 `protobuf:"varint,1,opt,name=container_port,json=containerPort,proto3" json:"container_port"` + HostPort uint32 `protobuf:"varint,2,opt,name=host_port,json=hostPort,proto3" json:"host_port"` + ContainerTlsProxyPort uint32 `protobuf:"varint,3,opt,name=container_tls_proxy_port,json=containerTlsProxyPort,proto3" json:"container_tls_proxy_port"` + HostTlsProxyPort uint32 `protobuf:"varint,4,opt,name=host_tls_proxy_port,json=hostTlsProxyPort,proto3" json:"host_tls_proxy_port"` +} + +func (m *PortMapping) Reset() { *m = PortMapping{} } +func (*PortMapping) ProtoMessage() {} +func (*PortMapping) Descriptor() ([]byte, []int) { + return fileDescriptor_25e5e77bfca46c1a, []int{1} +} +func (m *PortMapping) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PortMapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PortMapping.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PortMapping) XXX_Merge(src proto.Message) { + xxx_messageInfo_PortMapping.Merge(m, src) +} +func (m *PortMapping) XXX_Size() int { + return m.Size() +} +func (m *PortMapping) XXX_DiscardUnknown() { + xxx_messageInfo_PortMapping.DiscardUnknown(m) +} + +var xxx_messageInfo_PortMapping proto.InternalMessageInfo + +func (m *PortMapping) GetContainerPort() uint32 { + if m != nil { + return m.ContainerPort + } + return 0 +} + +func (m *PortMapping) GetHostPort() uint32 { + if m != nil { + return m.HostPort + } + return 0 +} + +func (m *PortMapping) GetContainerTlsProxyPort() uint32 { + if m != nil { + return m.ContainerTlsProxyPort + } + return 0 +} + +func (m *PortMapping) GetHostTlsProxyPort() uint32 { + if m != nil { + return m.HostTlsProxyPort + } + return 0 +} + +type ActualLRPKey struct { + ProcessGuid string `protobuf:"bytes,1,opt,name=process_guid,json=processGuid,proto3" json:"process_guid"` + Index int32 `protobuf:"varint,2,opt,name=index,proto3" json:"index"` + Domain string `protobuf:"bytes,3,opt,name=domain,proto3" json:"domain"` +} + +func (m *ActualLRPKey) Reset() { *m = ActualLRPKey{} } +func (*ActualLRPKey) ProtoMessage() {} +func (*ActualLRPKey) Descriptor() ([]byte, []int) { + return fileDescriptor_25e5e77bfca46c1a, []int{2} +} +func (m *ActualLRPKey) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPKey.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPKey.Merge(m, src) +} +func (m *ActualLRPKey) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPKey) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPKey.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPKey proto.InternalMessageInfo + +func (m *ActualLRPKey) GetProcessGuid() string { + if m != nil { + return m.ProcessGuid + } + return "" +} + +func (m *ActualLRPKey) GetIndex() int32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *ActualLRPKey) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +type ActualLRPInstanceKey struct { + InstanceGuid string `protobuf:"bytes,1,opt,name=instance_guid,json=instanceGuid,proto3" json:"instance_guid"` + CellId string `protobuf:"bytes,2,opt,name=cell_id,json=cellId,proto3" json:"cell_id"` +} + +func (m *ActualLRPInstanceKey) Reset() { *m = ActualLRPInstanceKey{} } +func (*ActualLRPInstanceKey) ProtoMessage() {} +func (*ActualLRPInstanceKey) Descriptor() ([]byte, []int) { + return fileDescriptor_25e5e77bfca46c1a, []int{3} +} +func (m *ActualLRPInstanceKey) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPInstanceKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPInstanceKey.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPInstanceKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPInstanceKey.Merge(m, src) +} +func (m *ActualLRPInstanceKey) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPInstanceKey) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPInstanceKey.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPInstanceKey proto.InternalMessageInfo + +func (m *ActualLRPInstanceKey) GetInstanceGuid() string { + if m != nil { + return m.InstanceGuid + } + return "" +} + +func (m *ActualLRPInstanceKey) GetCellId() string { + if m != nil { + return m.CellId + } + return "" +} + +type ActualLRPNetInfo struct { + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address"` + Ports []*PortMapping `protobuf:"bytes,2,rep,name=ports,proto3" json:"ports"` + InstanceAddress string `protobuf:"bytes,3,opt,name=instance_address,json=instanceAddress,proto3" json:"instance_address,omitempty"` + PreferredAddress ActualLRPNetInfo_PreferredAddress `protobuf:"varint,4,opt,name=preferred_address,json=preferredAddress,proto3,enum=models.ActualLRPNetInfo_PreferredAddress" json:"preferred_address"` +} + +func (m *ActualLRPNetInfo) Reset() { *m = ActualLRPNetInfo{} } +func (*ActualLRPNetInfo) ProtoMessage() {} +func (*ActualLRPNetInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_25e5e77bfca46c1a, []int{4} +} +func (m *ActualLRPNetInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPNetInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPNetInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPNetInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPNetInfo.Merge(m, src) +} +func (m *ActualLRPNetInfo) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPNetInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPNetInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPNetInfo proto.InternalMessageInfo + +func (m *ActualLRPNetInfo) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *ActualLRPNetInfo) GetPorts() []*PortMapping { + if m != nil { + return m.Ports + } + return nil +} + +func (m *ActualLRPNetInfo) GetInstanceAddress() string { + if m != nil { + return m.InstanceAddress + } + return "" +} + +func (m *ActualLRPNetInfo) GetPreferredAddress() ActualLRPNetInfo_PreferredAddress { + if m != nil { + return m.PreferredAddress + } + return ActualLRPNetInfo_PreferredAddressUnknown +} + +type ActualLRPInternalRoute struct { + Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname"` +} + +func (m *ActualLRPInternalRoute) Reset() { *m = ActualLRPInternalRoute{} } +func (*ActualLRPInternalRoute) ProtoMessage() {} +func (*ActualLRPInternalRoute) Descriptor() ([]byte, []int) { + return fileDescriptor_25e5e77bfca46c1a, []int{5} +} +func (m *ActualLRPInternalRoute) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPInternalRoute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPInternalRoute.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPInternalRoute) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPInternalRoute.Merge(m, src) +} +func (m *ActualLRPInternalRoute) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPInternalRoute) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPInternalRoute.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPInternalRoute proto.InternalMessageInfo + +func (m *ActualLRPInternalRoute) GetHostname() string { + if m != nil { + return m.Hostname + } + return "" +} + +type ActualLRP struct { + ActualLRPKey `protobuf:"bytes,1,opt,name=actual_lrp_key,json=actualLrpKey,proto3,embedded=actual_lrp_key" json:""` + ActualLRPInstanceKey `protobuf:"bytes,2,opt,name=actual_lrp_instance_key,json=actualLrpInstanceKey,proto3,embedded=actual_lrp_instance_key" json:""` + ActualLRPNetInfo `protobuf:"bytes,3,opt,name=actual_lrp_net_info,json=actualLrpNetInfo,proto3,embedded=actual_lrp_net_info" json:""` + CrashCount int32 `protobuf:"varint,4,opt,name=crash_count,json=crashCount,proto3" json:"crash_count"` + CrashReason string `protobuf:"bytes,5,opt,name=crash_reason,json=crashReason,proto3" json:"crash_reason,omitempty"` + State string `protobuf:"bytes,6,opt,name=state,proto3" json:"state"` + PlacementError string `protobuf:"bytes,7,opt,name=placement_error,json=placementError,proto3" json:"placement_error,omitempty"` + Since int64 `protobuf:"varint,8,opt,name=since,proto3" json:"since"` + ModificationTag ModificationTag `protobuf:"bytes,9,opt,name=modification_tag,json=modificationTag,proto3" json:"modification_tag"` + Presence ActualLRP_Presence `protobuf:"varint,10,opt,name=presence,proto3,enum=models.ActualLRP_Presence" json:"presence"` + ActualLrpInternalRoutes []*ActualLRPInternalRoute `protobuf:"bytes,11,rep,name=actual_lrp_internal_routes,json=actualLrpInternalRoutes,proto3" json:"actual_lrp_internal_routes,omitempty"` + MetricTags map[string]string `protobuf:"bytes,12,rep,name=metric_tags,json=metricTags,proto3" json:"metric_tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Types that are valid to be assigned to OptionalRoutable: + // *ActualLRP_Routable + OptionalRoutable isActualLRP_OptionalRoutable `protobuf_oneof:"optional_routable"` + AvailabilityZone string `protobuf:"bytes,14,opt,name=availability_zone,json=availabilityZone,proto3" json:"availability_zone"` +} + +func (m *ActualLRP) Reset() { *m = ActualLRP{} } +func (*ActualLRP) ProtoMessage() {} +func (*ActualLRP) Descriptor() ([]byte, []int) { + return fileDescriptor_25e5e77bfca46c1a, []int{6} +} +func (m *ActualLRP) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRP.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRP) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRP.Merge(m, src) +} +func (m *ActualLRP) XXX_Size() int { + return m.Size() +} +func (m *ActualLRP) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRP.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRP proto.InternalMessageInfo + +type isActualLRP_OptionalRoutable interface { + isActualLRP_OptionalRoutable() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type ActualLRP_Routable struct { + Routable bool `protobuf:"varint,13,opt,name=routable,proto3,oneof" json:"routable"` +} + +func (*ActualLRP_Routable) isActualLRP_OptionalRoutable() {} + +func (m *ActualLRP) GetOptionalRoutable() isActualLRP_OptionalRoutable { + if m != nil { + return m.OptionalRoutable + } + return nil +} + +func (m *ActualLRP) GetCrashCount() int32 { + if m != nil { + return m.CrashCount + } + return 0 +} + +func (m *ActualLRP) GetCrashReason() string { + if m != nil { + return m.CrashReason + } + return "" +} + +func (m *ActualLRP) GetState() string { + if m != nil { + return m.State + } + return "" +} + +func (m *ActualLRP) GetPlacementError() string { + if m != nil { + return m.PlacementError + } + return "" +} + +func (m *ActualLRP) GetSince() int64 { + if m != nil { + return m.Since + } + return 0 +} + +func (m *ActualLRP) GetModificationTag() ModificationTag { + if m != nil { + return m.ModificationTag + } + return ModificationTag{} +} + +func (m *ActualLRP) GetPresence() ActualLRP_Presence { + if m != nil { + return m.Presence + } + return ActualLRP_Ordinary +} + +func (m *ActualLRP) GetActualLrpInternalRoutes() []*ActualLRPInternalRoute { + if m != nil { + return m.ActualLrpInternalRoutes + } + return nil +} + +func (m *ActualLRP) GetMetricTags() map[string]string { + if m != nil { + return m.MetricTags + } + return nil +} + +func (m *ActualLRP) GetRoutable() bool { + if x, ok := m.GetOptionalRoutable().(*ActualLRP_Routable); ok { + return x.Routable + } + return false +} + +func (m *ActualLRP) GetAvailabilityZone() string { + if m != nil { + return m.AvailabilityZone + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ActualLRP) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ActualLRP_Routable)(nil), + } +} + +func init() { + proto.RegisterEnum("models.ActualLRPNetInfo_PreferredAddress", ActualLRPNetInfo_PreferredAddress_name, ActualLRPNetInfo_PreferredAddress_value) + proto.RegisterEnum("models.ActualLRP_Presence", ActualLRP_Presence_name, ActualLRP_Presence_value) + proto.RegisterType((*ActualLRPGroup)(nil), "models.ActualLRPGroup") + proto.RegisterType((*PortMapping)(nil), "models.PortMapping") + proto.RegisterType((*ActualLRPKey)(nil), "models.ActualLRPKey") + proto.RegisterType((*ActualLRPInstanceKey)(nil), "models.ActualLRPInstanceKey") + proto.RegisterType((*ActualLRPNetInfo)(nil), "models.ActualLRPNetInfo") + proto.RegisterType((*ActualLRPInternalRoute)(nil), "models.ActualLRPInternalRoute") + proto.RegisterType((*ActualLRP)(nil), "models.ActualLRP") + proto.RegisterMapType((map[string]string)(nil), "models.ActualLRP.MetricTagsEntry") +} + +func init() { proto.RegisterFile("actual_lrp.proto", fileDescriptor_25e5e77bfca46c1a) } + +var fileDescriptor_25e5e77bfca46c1a = []byte{ + // 1187 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x56, 0x4d, 0x6f, 0xdb, 0x46, + 0x13, 0x16, 0xe5, 0xd8, 0x96, 0x46, 0xb2, 0x4c, 0xaf, 0x9d, 0x98, 0xd0, 0x1b, 0x90, 0x8a, 0xf0, + 0x16, 0x75, 0x02, 0xc4, 0x69, 0x9d, 0xa0, 0x68, 0x03, 0xf4, 0x60, 0x3a, 0x6e, 0x6c, 0x24, 0x91, + 0x8d, 0xb5, 0xdd, 0xa2, 0x1f, 0x80, 0xba, 0xa6, 0xd6, 0x0a, 0x11, 0x8a, 0x4b, 0x2c, 0x57, 0x6e, + 0xd4, 0x53, 0x8f, 0x85, 0xd1, 0x43, 0x81, 0x5e, 0x7a, 0xf1, 0xbd, 0xbf, 0xa1, 0xbf, 0x20, 0x47, + 0x1f, 0x73, 0x22, 0x1a, 0xe5, 0x52, 0xf0, 0x94, 0x9f, 0x50, 0xec, 0xf2, 0x23, 0xb4, 0x14, 0x9f, + 0x76, 0xf7, 0xd9, 0x99, 0x67, 0x86, 0x33, 0xcf, 0x8e, 0x04, 0x3a, 0x71, 0xc4, 0x90, 0x78, 0x5d, + 0x8f, 0x07, 0xeb, 0x01, 0x67, 0x82, 0xa1, 0xb9, 0x01, 0xeb, 0x51, 0x2f, 0x6c, 0xde, 0xed, 0xbb, + 0xe2, 0xf9, 0xf0, 0x78, 0xdd, 0x61, 0x83, 0x7b, 0x7d, 0xd6, 0x67, 0xf7, 0xd4, 0xf5, 0xf1, 0xf0, + 0x44, 0x9d, 0xd4, 0x41, 0xed, 0x12, 0xb7, 0xe6, 0x8d, 0x01, 0xeb, 0xb9, 0x27, 0xae, 0x43, 0x84, + 0xcb, 0xfc, 0xae, 0x20, 0xfd, 0x04, 0x6f, 0x9f, 0x42, 0x63, 0x53, 0x85, 0x78, 0x8a, 0xf7, 0x1f, + 0x73, 0x36, 0x0c, 0xd0, 0x5d, 0xa8, 0xb8, 0x7e, 0x28, 0x88, 0xef, 0x50, 0x43, 0x6b, 0x69, 0x6b, + 0xb5, 0x8d, 0xa5, 0xf5, 0x24, 0xe6, 0x7a, 0x6e, 0x89, 0x73, 0x13, 0xf4, 0x29, 0x00, 0x3d, 0x25, + 0xce, 0x90, 0x08, 0xd7, 0xef, 0x1b, 0xe5, 0xab, 0x1c, 0x0a, 0x46, 0x0f, 0xcb, 0x86, 0xd6, 0xfe, + 0xa3, 0x0c, 0xb5, 0x7d, 0xc6, 0xc5, 0x33, 0x12, 0x04, 0xae, 0xdf, 0x47, 0x5f, 0x40, 0xc3, 0x61, + 0xbe, 0x20, 0xae, 0x4f, 0x79, 0x37, 0x60, 0x5c, 0xa8, 0xd8, 0x0b, 0x36, 0x8a, 0x23, 0x6b, 0xe2, + 0x06, 0x2f, 0xe4, 0x67, 0xc9, 0x80, 0xee, 0x40, 0xf5, 0x39, 0x0b, 0x45, 0xe2, 0x55, 0x56, 0x5e, + 0x0b, 0x71, 0x64, 0xbd, 0x07, 0x71, 0x45, 0x6e, 0x95, 0xed, 0x11, 0x18, 0xef, 0xc9, 0x84, 0x17, + 0x76, 0x03, 0xce, 0x5e, 0x8e, 0x12, 0xd7, 0x19, 0xe5, 0x7a, 0x33, 0x8e, 0xac, 0x2b, 0x6d, 0xf0, + 0xf5, 0xfc, 0xe6, 0xd0, 0x0b, 0xf7, 0x25, 0xae, 0x68, 0xbf, 0x82, 0x65, 0x15, 0x6d, 0x82, 0xf1, + 0x9a, 0x62, 0x5c, 0x8d, 0x23, 0xeb, 0x43, 0xd7, 0x58, 0x97, 0x60, 0x91, 0xa7, 0xfd, 0xab, 0x06, + 0xf5, 0xbc, 0x66, 0x4f, 0xe8, 0x08, 0xdd, 0x87, 0x7a, 0xc0, 0x99, 0x43, 0xc3, 0xb0, 0xdb, 0x1f, + 0xba, 0x3d, 0x55, 0x94, 0xaa, 0xad, 0xc7, 0x91, 0x75, 0x09, 0xc7, 0xb5, 0xf4, 0xf4, 0x78, 0xe8, + 0xf6, 0x90, 0x05, 0xb3, 0xae, 0xdf, 0xa3, 0x2f, 0x55, 0x31, 0x66, 0xed, 0x6a, 0x1c, 0x59, 0x09, + 0x80, 0x93, 0x05, 0xb5, 0x61, 0xae, 0xc7, 0x06, 0xc4, 0xf5, 0xd5, 0x37, 0x57, 0x6d, 0x88, 0x23, + 0x2b, 0x45, 0x70, 0xba, 0xb6, 0x05, 0xac, 0xe4, 0x99, 0xec, 0xa6, 0xcd, 0x96, 0x19, 0x7d, 0x06, + 0x0b, 0x59, 0xef, 0x8b, 0x29, 0x2d, 0xc5, 0x91, 0x75, 0xf9, 0x02, 0xd7, 0xb3, 0xa3, 0x4a, 0xea, + 0xff, 0x30, 0xef, 0x50, 0xcf, 0xeb, 0xba, 0x3d, 0x95, 0x56, 0xd5, 0xae, 0xc5, 0x91, 0x95, 0x41, + 0x78, 0x4e, 0x6e, 0x76, 0x7b, 0xed, 0x3f, 0x67, 0x40, 0xcf, 0xc3, 0x76, 0xa8, 0xd8, 0xf5, 0x4f, + 0x18, 0xfa, 0x08, 0xe6, 0x49, 0xaf, 0xc7, 0x69, 0x18, 0xa6, 0xc1, 0x94, 0x6b, 0x0a, 0xe1, 0x6c, + 0x83, 0x1e, 0xc0, 0xac, 0x2c, 0x6b, 0x68, 0x94, 0x5b, 0x33, 0x6b, 0xb5, 0x8d, 0xe5, 0x4c, 0x84, + 0x05, 0x99, 0x25, 0xb5, 0x50, 0x56, 0x38, 0x59, 0xd0, 0x6d, 0xd0, 0xf3, 0xb4, 0xb3, 0x28, 0xaa, + 0x2a, 0x78, 0x31, 0xc3, 0x37, 0xd3, 0x00, 0x03, 0x58, 0x0a, 0x38, 0x3d, 0xa1, 0x9c, 0xd3, 0x5e, + 0x6e, 0x2b, 0x7b, 0xdc, 0xd8, 0xb8, 0x3d, 0xa5, 0xf8, 0x34, 0xf9, 0xf5, 0xfd, 0xcc, 0x23, 0x65, + 0xb1, 0xaf, 0xc7, 0x91, 0x35, 0xcd, 0x83, 0xf5, 0x60, 0xc2, 0xb0, 0xfd, 0x9b, 0x06, 0xfa, 0xa4, + 0x37, 0x5a, 0x83, 0xf9, 0xa3, 0xce, 0x93, 0xce, 0xde, 0x37, 0x1d, 0xbd, 0xd4, 0xfc, 0xdf, 0xd9, + 0x79, 0x6b, 0x75, 0xd2, 0xe4, 0xc8, 0x7f, 0xe1, 0xb3, 0x9f, 0x7c, 0x74, 0x07, 0x2a, 0xbb, 0x9d, + 0x83, 0xc3, 0xcd, 0xce, 0xd6, 0xb6, 0xae, 0x35, 0x6f, 0x9e, 0x9d, 0xb7, 0x8c, 0x49, 0xd3, 0xac, + 0xaf, 0xa8, 0x0d, 0xd7, 0x76, 0xf6, 0x0e, 0x0e, 0xf5, 0x72, 0xd3, 0x38, 0x3b, 0x6f, 0xad, 0x4c, + 0xda, 0xed, 0xb0, 0x50, 0xb4, 0x6d, 0xb8, 0x51, 0x10, 0x84, 0xa0, 0xdc, 0x27, 0x1e, 0x66, 0x43, + 0x41, 0xd1, 0x1a, 0xa8, 0x07, 0xe6, 0x93, 0x01, 0x4d, 0x1b, 0x54, 0x8f, 0x23, 0x2b, 0xc7, 0x70, + 0xbe, 0x6b, 0xff, 0x5d, 0x81, 0x6a, 0x4e, 0x82, 0x76, 0xa0, 0xf1, 0x7e, 0xbc, 0x75, 0x5f, 0xd0, + 0x51, 0x3a, 0x6f, 0x56, 0xa6, 0x8a, 0xf9, 0x84, 0x8e, 0xec, 0xfa, 0xab, 0xc8, 0x2a, 0x5d, 0x44, + 0x96, 0x16, 0x47, 0x56, 0x09, 0xd7, 0x13, 0xcf, 0xa7, 0x3c, 0x90, 0xa2, 0x24, 0xb0, 0x5a, 0x60, + 0xca, 0xfb, 0x29, 0x29, 0x93, 0x89, 0x74, 0x73, 0x8a, 0xb2, 0xa0, 0xe9, 0x09, 0xea, 0x95, 0x9c, + 0xba, 0xa8, 0xfb, 0x23, 0x58, 0x2e, 0x84, 0xf0, 0xa9, 0xe8, 0xba, 0xfe, 0x09, 0x53, 0x52, 0xa9, + 0x6d, 0x18, 0x57, 0xb5, 0x7f, 0x82, 0x5a, 0xcf, 0xa9, 0x33, 0x6d, 0x7f, 0x02, 0x35, 0x87, 0x93, + 0xf0, 0x79, 0xd7, 0x61, 0x43, 0x3f, 0x99, 0x18, 0xb3, 0xf6, 0x62, 0x1c, 0x59, 0x45, 0x18, 0x83, + 0x3a, 0x6c, 0xc9, 0x3d, 0xba, 0x05, 0xf5, 0xe4, 0x8a, 0x53, 0x12, 0x32, 0xdf, 0x98, 0x55, 0x62, + 0x4d, 0xcc, 0xb1, 0x82, 0xe4, 0x00, 0x08, 0x05, 0x11, 0xd4, 0x98, 0x53, 0xdd, 0x50, 0xa2, 0x57, + 0x00, 0x4e, 0x16, 0xf4, 0x31, 0x2c, 0x06, 0x1e, 0x71, 0xe8, 0x80, 0xfa, 0xa2, 0x4b, 0x39, 0x67, + 0xdc, 0x98, 0x57, 0x34, 0x8d, 0x1c, 0xde, 0x96, 0xa8, 0x62, 0x72, 0xe5, 0x2f, 0x41, 0xa5, 0xa5, + 0xad, 0xcd, 0xa4, 0x4c, 0x12, 0xc0, 0xc9, 0x82, 0x7e, 0x00, 0x7d, 0xf2, 0x97, 0xc5, 0xa8, 0xaa, + 0x9a, 0xac, 0x66, 0x35, 0x79, 0x56, 0xb8, 0x3f, 0x24, 0x7d, 0xdb, 0x90, 0x25, 0x89, 0x23, 0x6b, + 0xca, 0x11, 0x2f, 0x0e, 0x2e, 0x9b, 0xa2, 0x47, 0x50, 0x09, 0x38, 0x0d, 0xa9, 0xcc, 0x00, 0xd4, + 0x43, 0x6b, 0x4e, 0x55, 0x5a, 0xbe, 0x30, 0x65, 0x91, 0xa8, 0x2e, 0xb3, 0xc7, 0xf9, 0x0e, 0x7d, + 0x0f, 0xcd, 0x4b, 0xea, 0x48, 0xb4, 0xdb, 0xe5, 0x52, 0xbc, 0xa1, 0x51, 0x53, 0xd3, 0xc2, 0xfc, + 0x80, 0x40, 0x0a, 0x1a, 0xc7, 0xab, 0x05, 0x51, 0x14, 0xf0, 0x10, 0xd9, 0x50, 0x1b, 0x50, 0xc1, + 0x5d, 0x47, 0x7e, 0x41, 0x68, 0xd4, 0x15, 0xdb, 0xad, 0xe9, 0x2c, 0x9f, 0x29, 0xa3, 0x43, 0xd2, + 0x0f, 0xb7, 0x7d, 0xc1, 0x47, 0x18, 0x06, 0x39, 0x20, 0x9f, 0xaa, 0x4c, 0x86, 0x1c, 0x7b, 0xd4, + 0x58, 0x68, 0x69, 0x6b, 0x95, 0xe4, 0x53, 0x32, 0x6c, 0xa7, 0x84, 0xf3, 0x3d, 0xb2, 0x61, 0x89, + 0x9c, 0x12, 0xd7, 0x23, 0xc7, 0xae, 0xe7, 0x8a, 0x51, 0xf7, 0x67, 0xe6, 0x53, 0xa3, 0xa1, 0xfa, + 0xac, 0x26, 0xcb, 0xd4, 0x25, 0xd6, 0x8b, 0xd0, 0x77, 0xcc, 0xa7, 0xcd, 0x2f, 0x61, 0x71, 0x22, + 0x1d, 0xa4, 0xc3, 0x4c, 0xf6, 0x00, 0xab, 0x58, 0x6e, 0xd1, 0x0a, 0xcc, 0x9e, 0x12, 0x6f, 0x48, + 0x93, 0x71, 0x8d, 0x93, 0xc3, 0xc3, 0xf2, 0xe7, 0x5a, 0xfb, 0x47, 0xa8, 0x64, 0x35, 0x47, 0x4d, + 0xa8, 0xec, 0xe1, 0x47, 0xbb, 0x9d, 0x4d, 0xfc, 0xad, 0x5e, 0x6a, 0xd6, 0xcf, 0xce, 0x5b, 0x95, + 0x3d, 0xde, 0x73, 0x7d, 0xc2, 0x47, 0xc8, 0x04, 0xd8, 0xfe, 0x7a, 0x73, 0xeb, 0x68, 0xf3, 0x70, + 0xb7, 0xf3, 0x58, 0xd7, 0x9a, 0x8d, 0xb3, 0xf3, 0x16, 0x6c, 0xe7, 0xff, 0x03, 0x90, 0x01, 0xf3, + 0x07, 0x47, 0x07, 0xfb, 0xdb, 0x5b, 0x72, 0xf0, 0xd4, 0xce, 0xce, 0x5b, 0xf3, 0x07, 0xc3, 0x30, + 0xa0, 0x8e, 0xb0, 0x97, 0x61, 0x89, 0x05, 0x52, 0x04, 0x69, 0x9b, 0xe4, 0x97, 0xdb, 0x0f, 0x2e, + 0xde, 0x98, 0xda, 0xeb, 0x37, 0x66, 0xe9, 0xdd, 0x1b, 0x53, 0xfb, 0x65, 0x6c, 0x6a, 0x7f, 0x8d, + 0x4d, 0xed, 0xd5, 0xd8, 0xd4, 0x2e, 0xc6, 0xa6, 0xf6, 0xcf, 0xd8, 0xd4, 0xfe, 0x1d, 0x9b, 0xa5, + 0x77, 0x63, 0x53, 0xfb, 0xfd, 0xad, 0x59, 0xba, 0x78, 0x6b, 0x96, 0x5e, 0xbf, 0x35, 0x4b, 0xc7, + 0x73, 0xea, 0x7f, 0xce, 0xfd, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa8, 0xf8, 0x7a, 0x0a, 0x4a, + 0x09, 0x00, 0x00, +} + +func (x ActualLRPNetInfo_PreferredAddress) String() string { + s, ok := ActualLRPNetInfo_PreferredAddress_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x ActualLRP_Presence) String() string { + s, ok := ActualLRP_Presence_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *ActualLRPGroup) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPGroup) + if !ok { + that2, ok := that.(ActualLRPGroup) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Instance.Equal(that1.Instance) { + return false + } + if !this.Evacuating.Equal(that1.Evacuating) { + return false + } + return true +} +func (this *PortMapping) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PortMapping) + if !ok { + that2, ok := that.(PortMapping) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ContainerPort != that1.ContainerPort { + return false + } + if this.HostPort != that1.HostPort { + return false + } + if this.ContainerTlsProxyPort != that1.ContainerTlsProxyPort { + return false + } + if this.HostTlsProxyPort != that1.HostTlsProxyPort { + return false + } + return true +} +func (this *ActualLRPKey) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPKey) + if !ok { + that2, ok := that.(ActualLRPKey) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ProcessGuid != that1.ProcessGuid { + return false + } + if this.Index != that1.Index { + return false + } + if this.Domain != that1.Domain { + return false + } + return true +} +func (this *ActualLRPInstanceKey) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPInstanceKey) + if !ok { + that2, ok := that.(ActualLRPInstanceKey) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.InstanceGuid != that1.InstanceGuid { + return false + } + if this.CellId != that1.CellId { + return false + } + return true +} +func (this *ActualLRPNetInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPNetInfo) + if !ok { + that2, ok := that.(ActualLRPNetInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Address != that1.Address { + return false + } + if len(this.Ports) != len(that1.Ports) { + return false + } + for i := range this.Ports { + if !this.Ports[i].Equal(that1.Ports[i]) { + return false + } + } + if this.InstanceAddress != that1.InstanceAddress { + return false + } + if this.PreferredAddress != that1.PreferredAddress { + return false + } + return true +} +func (this *ActualLRPInternalRoute) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPInternalRoute) + if !ok { + that2, ok := that.(ActualLRPInternalRoute) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Hostname != that1.Hostname { + return false + } + return true +} +func (this *ActualLRP) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRP) + if !ok { + that2, ok := that.(ActualLRP) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ActualLRPKey.Equal(&that1.ActualLRPKey) { + return false + } + if !this.ActualLRPInstanceKey.Equal(&that1.ActualLRPInstanceKey) { + return false + } + if !this.ActualLRPNetInfo.Equal(&that1.ActualLRPNetInfo) { + return false + } + if this.CrashCount != that1.CrashCount { + return false + } + if this.CrashReason != that1.CrashReason { + return false + } + if this.State != that1.State { + return false + } + if this.PlacementError != that1.PlacementError { + return false + } + if this.Since != that1.Since { + return false + } + if !this.ModificationTag.Equal(&that1.ModificationTag) { + return false + } + if this.Presence != that1.Presence { + return false + } + if len(this.ActualLrpInternalRoutes) != len(that1.ActualLrpInternalRoutes) { + return false + } + for i := range this.ActualLrpInternalRoutes { + if !this.ActualLrpInternalRoutes[i].Equal(that1.ActualLrpInternalRoutes[i]) { + return false + } + } + if len(this.MetricTags) != len(that1.MetricTags) { + return false + } + for i := range this.MetricTags { + if this.MetricTags[i] != that1.MetricTags[i] { + return false + } + } + if that1.OptionalRoutable == nil { + if this.OptionalRoutable != nil { + return false + } + } else if this.OptionalRoutable == nil { + return false + } else if !this.OptionalRoutable.Equal(that1.OptionalRoutable) { + return false + } + if this.AvailabilityZone != that1.AvailabilityZone { + return false + } + return true +} +func (this *ActualLRP_Routable) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRP_Routable) + if !ok { + that2, ok := that.(ActualLRP_Routable) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Routable != that1.Routable { + return false + } + return true +} +func (this *ActualLRPGroup) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ActualLRPGroup{") + if this.Instance != nil { + s = append(s, "Instance: "+fmt.Sprintf("%#v", this.Instance)+",\n") + } + if this.Evacuating != nil { + s = append(s, "Evacuating: "+fmt.Sprintf("%#v", this.Evacuating)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *PortMapping) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&models.PortMapping{") + s = append(s, "ContainerPort: "+fmt.Sprintf("%#v", this.ContainerPort)+",\n") + s = append(s, "HostPort: "+fmt.Sprintf("%#v", this.HostPort)+",\n") + s = append(s, "ContainerTlsProxyPort: "+fmt.Sprintf("%#v", this.ContainerTlsProxyPort)+",\n") + s = append(s, "HostTlsProxyPort: "+fmt.Sprintf("%#v", this.HostTlsProxyPort)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPKey) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&models.ActualLRPKey{") + s = append(s, "ProcessGuid: "+fmt.Sprintf("%#v", this.ProcessGuid)+",\n") + s = append(s, "Index: "+fmt.Sprintf("%#v", this.Index)+",\n") + s = append(s, "Domain: "+fmt.Sprintf("%#v", this.Domain)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPInstanceKey) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ActualLRPInstanceKey{") + s = append(s, "InstanceGuid: "+fmt.Sprintf("%#v", this.InstanceGuid)+",\n") + s = append(s, "CellId: "+fmt.Sprintf("%#v", this.CellId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPNetInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&models.ActualLRPNetInfo{") + s = append(s, "Address: "+fmt.Sprintf("%#v", this.Address)+",\n") + if this.Ports != nil { + s = append(s, "Ports: "+fmt.Sprintf("%#v", this.Ports)+",\n") + } + s = append(s, "InstanceAddress: "+fmt.Sprintf("%#v", this.InstanceAddress)+",\n") + s = append(s, "PreferredAddress: "+fmt.Sprintf("%#v", this.PreferredAddress)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPInternalRoute) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.ActualLRPInternalRoute{") + s = append(s, "Hostname: "+fmt.Sprintf("%#v", this.Hostname)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRP) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 18) + s = append(s, "&models.ActualLRP{") + s = append(s, "ActualLRPKey: "+strings.Replace(this.ActualLRPKey.GoString(), `&`, ``, 1)+",\n") + s = append(s, "ActualLRPInstanceKey: "+strings.Replace(this.ActualLRPInstanceKey.GoString(), `&`, ``, 1)+",\n") + s = append(s, "ActualLRPNetInfo: "+strings.Replace(this.ActualLRPNetInfo.GoString(), `&`, ``, 1)+",\n") + s = append(s, "CrashCount: "+fmt.Sprintf("%#v", this.CrashCount)+",\n") + s = append(s, "CrashReason: "+fmt.Sprintf("%#v", this.CrashReason)+",\n") + s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") + s = append(s, "PlacementError: "+fmt.Sprintf("%#v", this.PlacementError)+",\n") + s = append(s, "Since: "+fmt.Sprintf("%#v", this.Since)+",\n") + s = append(s, "ModificationTag: "+strings.Replace(this.ModificationTag.GoString(), `&`, ``, 1)+",\n") + s = append(s, "Presence: "+fmt.Sprintf("%#v", this.Presence)+",\n") + if this.ActualLrpInternalRoutes != nil { + s = append(s, "ActualLrpInternalRoutes: "+fmt.Sprintf("%#v", this.ActualLrpInternalRoutes)+",\n") + } + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]string{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%#v: %#v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + if this.MetricTags != nil { + s = append(s, "MetricTags: "+mapStringForMetricTags+",\n") + } + if this.OptionalRoutable != nil { + s = append(s, "OptionalRoutable: "+fmt.Sprintf("%#v", this.OptionalRoutable)+",\n") + } + s = append(s, "AvailabilityZone: "+fmt.Sprintf("%#v", this.AvailabilityZone)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRP_Routable) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&models.ActualLRP_Routable{` + + `Routable:` + fmt.Sprintf("%#v", this.Routable) + `}`}, ", ") + return s +} +func valueToGoStringActualLrp(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *ActualLRPGroup) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPGroup) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Evacuating != nil { + { + size, err := m.Evacuating.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Instance != nil { + { + size, err := m.Instance.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PortMapping) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PortMapping) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PortMapping) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.HostTlsProxyPort != 0 { + i = encodeVarintActualLrp(dAtA, i, uint64(m.HostTlsProxyPort)) + i-- + dAtA[i] = 0x20 + } + if m.ContainerTlsProxyPort != 0 { + i = encodeVarintActualLrp(dAtA, i, uint64(m.ContainerTlsProxyPort)) + i-- + dAtA[i] = 0x18 + } + if m.HostPort != 0 { + i = encodeVarintActualLrp(dAtA, i, uint64(m.HostPort)) + i-- + dAtA[i] = 0x10 + } + if m.ContainerPort != 0 { + i = encodeVarintActualLrp(dAtA, i, uint64(m.ContainerPort)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPKey) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPKey) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPKey) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Domain) > 0 { + i -= len(m.Domain) + copy(dAtA[i:], m.Domain) + i = encodeVarintActualLrp(dAtA, i, uint64(len(m.Domain))) + i-- + dAtA[i] = 0x1a + } + if m.Index != 0 { + i = encodeVarintActualLrp(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x10 + } + if len(m.ProcessGuid) > 0 { + i -= len(m.ProcessGuid) + copy(dAtA[i:], m.ProcessGuid) + i = encodeVarintActualLrp(dAtA, i, uint64(len(m.ProcessGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPInstanceKey) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPInstanceKey) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPInstanceKey) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.CellId) > 0 { + i -= len(m.CellId) + copy(dAtA[i:], m.CellId) + i = encodeVarintActualLrp(dAtA, i, uint64(len(m.CellId))) + i-- + dAtA[i] = 0x12 + } + if len(m.InstanceGuid) > 0 { + i -= len(m.InstanceGuid) + copy(dAtA[i:], m.InstanceGuid) + i = encodeVarintActualLrp(dAtA, i, uint64(len(m.InstanceGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPNetInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPNetInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPNetInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PreferredAddress != 0 { + i = encodeVarintActualLrp(dAtA, i, uint64(m.PreferredAddress)) + i-- + dAtA[i] = 0x20 + } + if len(m.InstanceAddress) > 0 { + i -= len(m.InstanceAddress) + copy(dAtA[i:], m.InstanceAddress) + i = encodeVarintActualLrp(dAtA, i, uint64(len(m.InstanceAddress))) + i-- + dAtA[i] = 0x1a + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintActualLrp(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPInternalRoute) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPInternalRoute) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPInternalRoute) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintActualLrp(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRP) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRP) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRP) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AvailabilityZone) > 0 { + i -= len(m.AvailabilityZone) + copy(dAtA[i:], m.AvailabilityZone) + i = encodeVarintActualLrp(dAtA, i, uint64(len(m.AvailabilityZone))) + i-- + dAtA[i] = 0x72 + } + if m.OptionalRoutable != nil { + { + size := m.OptionalRoutable.Size() + i -= size + if _, err := m.OptionalRoutable.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if len(m.MetricTags) > 0 { + for k := range m.MetricTags { + v := m.MetricTags[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintActualLrp(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintActualLrp(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintActualLrp(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x62 + } + } + if len(m.ActualLrpInternalRoutes) > 0 { + for iNdEx := len(m.ActualLrpInternalRoutes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ActualLrpInternalRoutes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } + if m.Presence != 0 { + i = encodeVarintActualLrp(dAtA, i, uint64(m.Presence)) + i-- + dAtA[i] = 0x50 + } + { + size, err := m.ModificationTag.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + if m.Since != 0 { + i = encodeVarintActualLrp(dAtA, i, uint64(m.Since)) + i-- + dAtA[i] = 0x40 + } + if len(m.PlacementError) > 0 { + i -= len(m.PlacementError) + copy(dAtA[i:], m.PlacementError) + i = encodeVarintActualLrp(dAtA, i, uint64(len(m.PlacementError))) + i-- + dAtA[i] = 0x3a + } + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintActualLrp(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x32 + } + if len(m.CrashReason) > 0 { + i -= len(m.CrashReason) + copy(dAtA[i:], m.CrashReason) + i = encodeVarintActualLrp(dAtA, i, uint64(len(m.CrashReason))) + i-- + dAtA[i] = 0x2a + } + if m.CrashCount != 0 { + i = encodeVarintActualLrp(dAtA, i, uint64(m.CrashCount)) + i-- + dAtA[i] = 0x20 + } + { + size, err := m.ActualLRPNetInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.ActualLRPInstanceKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ActualLRPKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ActualLRP_Routable) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRP_Routable) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.Routable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x68 + return len(dAtA) - i, nil +} +func encodeVarintActualLrp(dAtA []byte, offset int, v uint64) int { + offset -= sovActualLrp(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ActualLRPGroup) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Instance != nil { + l = m.Instance.Size() + n += 1 + l + sovActualLrp(uint64(l)) + } + if m.Evacuating != nil { + l = m.Evacuating.Size() + n += 1 + l + sovActualLrp(uint64(l)) + } + return n +} + +func (m *PortMapping) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ContainerPort != 0 { + n += 1 + sovActualLrp(uint64(m.ContainerPort)) + } + if m.HostPort != 0 { + n += 1 + sovActualLrp(uint64(m.HostPort)) + } + if m.ContainerTlsProxyPort != 0 { + n += 1 + sovActualLrp(uint64(m.ContainerTlsProxyPort)) + } + if m.HostTlsProxyPort != 0 { + n += 1 + sovActualLrp(uint64(m.HostTlsProxyPort)) + } + return n +} + +func (m *ActualLRPKey) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProcessGuid) + if l > 0 { + n += 1 + l + sovActualLrp(uint64(l)) + } + if m.Index != 0 { + n += 1 + sovActualLrp(uint64(m.Index)) + } + l = len(m.Domain) + if l > 0 { + n += 1 + l + sovActualLrp(uint64(l)) + } + return n +} + +func (m *ActualLRPInstanceKey) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.InstanceGuid) + if l > 0 { + n += 1 + l + sovActualLrp(uint64(l)) + } + l = len(m.CellId) + if l > 0 { + n += 1 + l + sovActualLrp(uint64(l)) + } + return n +} + +func (m *ActualLRPNetInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovActualLrp(uint64(l)) + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovActualLrp(uint64(l)) + } + } + l = len(m.InstanceAddress) + if l > 0 { + n += 1 + l + sovActualLrp(uint64(l)) + } + if m.PreferredAddress != 0 { + n += 1 + sovActualLrp(uint64(m.PreferredAddress)) + } + return n +} + +func (m *ActualLRPInternalRoute) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sovActualLrp(uint64(l)) + } + return n +} + +func (m *ActualLRP) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ActualLRPKey.Size() + n += 1 + l + sovActualLrp(uint64(l)) + l = m.ActualLRPInstanceKey.Size() + n += 1 + l + sovActualLrp(uint64(l)) + l = m.ActualLRPNetInfo.Size() + n += 1 + l + sovActualLrp(uint64(l)) + if m.CrashCount != 0 { + n += 1 + sovActualLrp(uint64(m.CrashCount)) + } + l = len(m.CrashReason) + if l > 0 { + n += 1 + l + sovActualLrp(uint64(l)) + } + l = len(m.State) + if l > 0 { + n += 1 + l + sovActualLrp(uint64(l)) + } + l = len(m.PlacementError) + if l > 0 { + n += 1 + l + sovActualLrp(uint64(l)) + } + if m.Since != 0 { + n += 1 + sovActualLrp(uint64(m.Since)) + } + l = m.ModificationTag.Size() + n += 1 + l + sovActualLrp(uint64(l)) + if m.Presence != 0 { + n += 1 + sovActualLrp(uint64(m.Presence)) + } + if len(m.ActualLrpInternalRoutes) > 0 { + for _, e := range m.ActualLrpInternalRoutes { + l = e.Size() + n += 1 + l + sovActualLrp(uint64(l)) + } + } + if len(m.MetricTags) > 0 { + for k, v := range m.MetricTags { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovActualLrp(uint64(len(k))) + 1 + len(v) + sovActualLrp(uint64(len(v))) + n += mapEntrySize + 1 + sovActualLrp(uint64(mapEntrySize)) + } + } + if m.OptionalRoutable != nil { + n += m.OptionalRoutable.Size() + } + l = len(m.AvailabilityZone) + if l > 0 { + n += 1 + l + sovActualLrp(uint64(l)) + } + return n +} + +func (m *ActualLRP_Routable) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} + +func sovActualLrp(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozActualLrp(x uint64) (n int) { + return sovActualLrp(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ActualLRPGroup) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPGroup{`, + `Instance:` + strings.Replace(this.Instance.String(), "ActualLRP", "ActualLRP", 1) + `,`, + `Evacuating:` + strings.Replace(this.Evacuating.String(), "ActualLRP", "ActualLRP", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PortMapping) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortMapping{`, + `ContainerPort:` + fmt.Sprintf("%v", this.ContainerPort) + `,`, + `HostPort:` + fmt.Sprintf("%v", this.HostPort) + `,`, + `ContainerTlsProxyPort:` + fmt.Sprintf("%v", this.ContainerTlsProxyPort) + `,`, + `HostTlsProxyPort:` + fmt.Sprintf("%v", this.HostTlsProxyPort) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPKey) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPKey{`, + `ProcessGuid:` + fmt.Sprintf("%v", this.ProcessGuid) + `,`, + `Index:` + fmt.Sprintf("%v", this.Index) + `,`, + `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPInstanceKey) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPInstanceKey{`, + `InstanceGuid:` + fmt.Sprintf("%v", this.InstanceGuid) + `,`, + `CellId:` + fmt.Sprintf("%v", this.CellId) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPNetInfo) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]*PortMapping{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(f.String(), "PortMapping", "PortMapping", 1) + "," + } + repeatedStringForPorts += "}" + s := strings.Join([]string{`&ActualLRPNetInfo{`, + `Address:` + fmt.Sprintf("%v", this.Address) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `InstanceAddress:` + fmt.Sprintf("%v", this.InstanceAddress) + `,`, + `PreferredAddress:` + fmt.Sprintf("%v", this.PreferredAddress) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPInternalRoute) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPInternalRoute{`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRP) String() string { + if this == nil { + return "nil" + } + repeatedStringForActualLrpInternalRoutes := "[]*ActualLRPInternalRoute{" + for _, f := range this.ActualLrpInternalRoutes { + repeatedStringForActualLrpInternalRoutes += strings.Replace(f.String(), "ActualLRPInternalRoute", "ActualLRPInternalRoute", 1) + "," + } + repeatedStringForActualLrpInternalRoutes += "}" + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]string{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%v: %v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + s := strings.Join([]string{`&ActualLRP{`, + `ActualLRPKey:` + strings.Replace(strings.Replace(this.ActualLRPKey.String(), "ActualLRPKey", "ActualLRPKey", 1), `&`, ``, 1) + `,`, + `ActualLRPInstanceKey:` + strings.Replace(strings.Replace(this.ActualLRPInstanceKey.String(), "ActualLRPInstanceKey", "ActualLRPInstanceKey", 1), `&`, ``, 1) + `,`, + `ActualLRPNetInfo:` + strings.Replace(strings.Replace(this.ActualLRPNetInfo.String(), "ActualLRPNetInfo", "ActualLRPNetInfo", 1), `&`, ``, 1) + `,`, + `CrashCount:` + fmt.Sprintf("%v", this.CrashCount) + `,`, + `CrashReason:` + fmt.Sprintf("%v", this.CrashReason) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `PlacementError:` + fmt.Sprintf("%v", this.PlacementError) + `,`, + `Since:` + fmt.Sprintf("%v", this.Since) + `,`, + `ModificationTag:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ModificationTag), "ModificationTag", "ModificationTag", 1), `&`, ``, 1) + `,`, + `Presence:` + fmt.Sprintf("%v", this.Presence) + `,`, + `ActualLrpInternalRoutes:` + repeatedStringForActualLrpInternalRoutes + `,`, + `MetricTags:` + mapStringForMetricTags + `,`, + `OptionalRoutable:` + fmt.Sprintf("%v", this.OptionalRoutable) + `,`, + `AvailabilityZone:` + fmt.Sprintf("%v", this.AvailabilityZone) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRP_Routable) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRP_Routable{`, + `Routable:` + fmt.Sprintf("%v", this.Routable) + `,`, + `}`, + }, "") + return s +} +func valueToStringActualLrp(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ActualLRPGroup) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPGroup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPGroup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Instance", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Instance == nil { + m.Instance = &ActualLRP{} + } + if err := m.Instance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Evacuating", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Evacuating == nil { + m.Evacuating = &ActualLRP{} + } + if err := m.Evacuating.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PortMapping) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortMapping: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortMapping: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) + } + m.ContainerPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ContainerPort |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) + } + m.HostPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HostPort |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerTlsProxyPort", wireType) + } + m.ContainerTlsProxyPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ContainerTlsProxyPort |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostTlsProxyPort", wireType) + } + m.HostTlsProxyPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HostTlsProxyPort |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipActualLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPKey) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPInstanceKey) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPInstanceKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPInstanceKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InstanceGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InstanceGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CellId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPNetInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPNetInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPNetInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, &PortMapping{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InstanceAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InstanceAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredAddress", wireType) + } + m.PreferredAddress = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PreferredAddress |= ActualLRPNetInfo_PreferredAddress(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipActualLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPInternalRoute) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPInternalRoute: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPInternalRoute: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRP) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRP: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRP: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLRPKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ActualLRPKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLRPInstanceKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ActualLRPInstanceKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLRPNetInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ActualLRPNetInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CrashCount", wireType) + } + m.CrashCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CrashCount |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CrashReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CrashReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PlacementError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PlacementError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType) + } + m.Since = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Since |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ModificationTag", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ModificationTag.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Presence", wireType) + } + m.Presence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Presence |= ActualLRP_Presence(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpInternalRoutes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ActualLrpInternalRoutes = append(m.ActualLrpInternalRoutes, &ActualLRPInternalRoute{}) + if err := m.ActualLrpInternalRoutes[len(m.ActualLrpInternalRoutes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricTags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricTags == nil { + m.MetricTags = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthActualLrp + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthActualLrp + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthActualLrp + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthActualLrp + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipActualLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrp + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MetricTags[mapkey] = mapvalue + iNdEx = postIndex + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Routable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OptionalRoutable = &ActualLRP_Routable{b} + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailabilityZone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AvailabilityZone = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipActualLrp(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowActualLrp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowActualLrp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowActualLrp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthActualLrp + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupActualLrp + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthActualLrp + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthActualLrp = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowActualLrp = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupActualLrp = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/actual_lrp.proto b/vendor/code.cloudfoundry.org/bbs/models/actual_lrp.proto new file mode 100644 index 00000000..aa089c96 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/actual_lrp.proto @@ -0,0 +1,75 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "modification_tag.proto"; + +option (gogoproto.goproto_enum_prefix_all) = true; + +message ActualLRPGroup { + option deprecated = true; + ActualLRP instance = 1; + ActualLRP evacuating = 2; +} + +message PortMapping { + uint32 container_port = 1 [(gogoproto.jsontag) = "container_port"]; + uint32 host_port = 2 [(gogoproto.jsontag) = "host_port"]; + uint32 container_tls_proxy_port = 3 [(gogoproto.jsontag) = "container_tls_proxy_port"]; + uint32 host_tls_proxy_port = 4 [(gogoproto.jsontag) = "host_tls_proxy_port"]; +} + +message ActualLRPKey { + string process_guid = 1 [(gogoproto.jsontag) = "process_guid"]; + int32 index = 2 [(gogoproto.jsontag) = "index"]; + string domain = 3 [(gogoproto.jsontag) = "domain"]; +} + +message ActualLRPInstanceKey { + string instance_guid = 1 [(gogoproto.jsontag) = "instance_guid"]; + string cell_id = 2 [(gogoproto.jsontag) = "cell_id"]; +} + +message ActualLRPNetInfo { + string address = 1 [(gogoproto.jsontag) = "address"]; + repeated PortMapping ports = 2 [(gogoproto.jsontag) = "ports"]; + string instance_address = 3; + + enum PreferredAddress { + UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "PreferredAddressUnknown"]; + INSTANCE = 1 [(gogoproto.enumvalue_customname) = "PreferredAddressInstance"]; + HOST = 2 [(gogoproto.enumvalue_customname) = "PreferredAddressHost"]; + } + + PreferredAddress preferred_address = 4 [(gogoproto.jsontag) = "preferred_address"]; +} + +message ActualLRPInternalRoute { + string hostname = 1 [(gogoproto.jsontag) = "hostname"]; +} + +message ActualLRP { + enum Presence { + ORDINARY = 0 [(gogoproto.enumvalue_customname) = "Ordinary"]; + EVACUATING = 1 [(gogoproto.enumvalue_customname) = "Evacuating"]; + SUSPECT = 2 [(gogoproto.enumvalue_customname) = "Suspect"]; + } + + ActualLRPKey actual_lrp_key = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + ActualLRPInstanceKey actual_lrp_instance_key = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + ActualLRPNetInfo actual_lrp_net_info = 3 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + int32 crash_count = 4 [(gogoproto.jsontag) = "crash_count"]; + string crash_reason = 5; + string state = 6 [(gogoproto.jsontag) = "state"]; + string placement_error = 7; + int64 since = 8 [(gogoproto.jsontag) = "since"]; + ModificationTag modification_tag = 9 [(gogoproto.nullable) = false,(gogoproto.jsontag) = "modification_tag"]; + Presence presence = 10 [(gogoproto.jsontag) = "presence"]; + repeated ActualLRPInternalRoute actual_lrp_internal_routes = 11; + map metric_tags = 12; + oneof optional_routable { + bool routable = 13 [(gogoproto.jsontag) = "routable"]; + } + string availability_zone = 14 [(gogoproto.jsontag) = "availability_zone"]; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/actual_lrp_requests.go b/vendor/code.cloudfoundry.org/bbs/models/actual_lrp_requests.go new file mode 100644 index 00000000..96268193 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/actual_lrp_requests.go @@ -0,0 +1,346 @@ +package models + +import "encoding/json" + +func (request *ActualLRPsRequest) Validate() error { + return nil +} + +func (request *ActualLRPsRequest) SetIndex(index int32) { + request.OptionalIndex = &ActualLRPsRequest_Index{Index: index} +} + +func (request ActualLRPsRequest) IndexExists() bool { + _, ok := request.GetOptionalIndex().(*ActualLRPsRequest_Index) + return ok +} + +type internalActualLRPsRequest struct { + Domain string `json:"domain"` + CellId string `json:"cell_id"` + ProcessGuid string `json:"process_guid"` + Index *int32 `json:"index,omitempty"` +} + +func (request *ActualLRPsRequest) UnmarshalJSON(data []byte) error { + var internalRequest internalActualLRPsRequest + if err := json.Unmarshal(data, &internalRequest); err != nil { + return err + } + + request.Domain = internalRequest.Domain + request.CellId = internalRequest.CellId + request.ProcessGuid = internalRequest.ProcessGuid + if internalRequest.Index != nil { + request.SetIndex(*internalRequest.Index) + } + + return nil +} + +func (request ActualLRPsRequest) MarshalJSON() ([]byte, error) { + internalRequest := internalActualLRPsRequest{ + Domain: request.Domain, + CellId: request.CellId, + ProcessGuid: request.ProcessGuid, + } + + if request.IndexExists() { + i := request.GetIndex() + internalRequest.Index = &i + } + return json.Marshal(internalRequest) +} + +// Deprecated: use the ActualLRPInstances API instead +func (request *ActualLRPGroupsRequest) Validate() error { + return nil +} + +// Deprecated: use the ActualLRPInstances API instead +func (request *ActualLRPGroupsByProcessGuidRequest) Validate() error { + var validationError ValidationError + + if request.ProcessGuid == "" { + validationError = validationError.Append(ErrInvalidField{"process_guid"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +// Deprecated: use the ActualLRPInstances API instead +func (request *ActualLRPGroupByProcessGuidAndIndexRequest) Validate() error { + var validationError ValidationError + + if request.ProcessGuid == "" { + validationError = validationError.Append(ErrInvalidField{"process_guid"}) + } + + if request.Index < 0 { + validationError = validationError.Append(ErrInvalidField{"index"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *RemoveActualLRPRequest) Validate() error { + var validationError ValidationError + + if request.ProcessGuid == "" { + validationError = validationError.Append(ErrInvalidField{"process_guid"}) + } + + if request.Index < 0 { + validationError = validationError.Append(ErrInvalidField{"index"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *ClaimActualLRPRequest) Validate() error { + var validationError ValidationError + + if request.ProcessGuid == "" { + validationError = validationError.Append(ErrInvalidField{"process_guid"}) + } + + if request.ActualLrpInstanceKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_instance_key"}) + } else if err := request.ActualLrpInstanceKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *StartActualLRPRequest) Validate() error { + var validationError ValidationError + + if request.ActualLrpKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_key"}) + } else if err := request.ActualLrpKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if request.ActualLrpInstanceKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_instance_key"}) + } else if err := request.ActualLrpInstanceKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if request.ActualLrpNetInfo == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_net_info"}) + } else if err := request.ActualLrpNetInfo.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *StartActualLRPRequest) SetRoutable(routable bool) { + request.OptionalRoutable = &StartActualLRPRequest_Routable{ + Routable: routable, + } +} + +func (request *StartActualLRPRequest) RoutableExists() bool { + _, ok := request.GetOptionalRoutable().(*StartActualLRPRequest_Routable) + return ok +} + +func (request *CrashActualLRPRequest) Validate() error { + var validationError ValidationError + + if request.ActualLrpKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_key"}) + } else if err := request.ActualLrpKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if request.ActualLrpInstanceKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_instance_key"}) + } else if err := request.ActualLrpInstanceKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *FailActualLRPRequest) Validate() error { + var validationError ValidationError + + if request.ActualLrpKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_key"}) + } else if err := request.ActualLrpKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if request.ErrorMessage == "" { + validationError = validationError.Append(ErrInvalidField{"error_message"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *RetireActualLRPRequest) Validate() error { + var validationError ValidationError + + if request.ActualLrpKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_key"}) + } else if err := request.ActualLrpKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *RemoveEvacuatingActualLRPRequest) Validate() error { + var validationError ValidationError + + if request.ActualLrpKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_key"}) + } else if err := request.ActualLrpKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if request.ActualLrpInstanceKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_instance_key"}) + } else if err := request.ActualLrpInstanceKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *EvacuateClaimedActualLRPRequest) Validate() error { + var validationError ValidationError + + if request.ActualLrpKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_key"}) + } else if err := request.ActualLrpKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if request.ActualLrpInstanceKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_instance_key"}) + } else if err := request.ActualLrpInstanceKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *EvacuateCrashedActualLRPRequest) Validate() error { + var validationError ValidationError + + if request.ActualLrpKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_key"}) + } else if err := request.ActualLrpKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if request.ActualLrpInstanceKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_instance_key"}) + } else if err := request.ActualLrpInstanceKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if request.ErrorMessage == "" { + validationError = validationError.Append(ErrInvalidField{"error_message"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *EvacuateStoppedActualLRPRequest) Validate() error { + var validationError ValidationError + + if request.ActualLrpKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_key"}) + } else if err := request.ActualLrpKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if request.ActualLrpInstanceKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_instance_key"}) + } else if err := request.ActualLrpInstanceKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *EvacuateRunningActualLRPRequest) Validate() error { + var validationError ValidationError + + if request.ActualLrpKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_key"}) + } else if err := request.ActualLrpKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if request.ActualLrpInstanceKey == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_instance_key"}) + } else if err := request.ActualLrpInstanceKey.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if request.ActualLrpNetInfo == nil { + validationError = validationError.Append(ErrInvalidField{"actual_lrp_net_info"}) + } else if err := request.ActualLrpNetInfo.Validate(); err != nil { + validationError = validationError.Append(err) + } + if !validationError.Empty() { + return validationError + } + + return nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/actual_lrp_requests.pb.go b/vendor/code.cloudfoundry.org/bbs/models/actual_lrp_requests.pb.go new file mode 100644 index 00000000..f34f6715 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/actual_lrp_requests.pb.go @@ -0,0 +1,4872 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: actual_lrp_requests.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ActualLRPLifecycleResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *ActualLRPLifecycleResponse) Reset() { *m = ActualLRPLifecycleResponse{} } +func (*ActualLRPLifecycleResponse) ProtoMessage() {} +func (*ActualLRPLifecycleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{0} +} +func (m *ActualLRPLifecycleResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPLifecycleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPLifecycleResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPLifecycleResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPLifecycleResponse.Merge(m, src) +} +func (m *ActualLRPLifecycleResponse) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPLifecycleResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPLifecycleResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPLifecycleResponse proto.InternalMessageInfo + +func (m *ActualLRPLifecycleResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +// Deprecated: Do not use. +type ActualLRPGroupsResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + ActualLrpGroups []*ActualLRPGroup `protobuf:"bytes,2,rep,name=actual_lrp_groups,json=actualLrpGroups,proto3" json:"actual_lrp_groups,omitempty"` +} + +func (m *ActualLRPGroupsResponse) Reset() { *m = ActualLRPGroupsResponse{} } +func (*ActualLRPGroupsResponse) ProtoMessage() {} +func (*ActualLRPGroupsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{1} +} +func (m *ActualLRPGroupsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPGroupsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPGroupsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPGroupsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPGroupsResponse.Merge(m, src) +} +func (m *ActualLRPGroupsResponse) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPGroupsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPGroupsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPGroupsResponse proto.InternalMessageInfo + +func (m *ActualLRPGroupsResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *ActualLRPGroupsResponse) GetActualLrpGroups() []*ActualLRPGroup { + if m != nil { + return m.ActualLrpGroups + } + return nil +} + +// Deprecated: Do not use. +type ActualLRPGroupResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + ActualLrpGroup *ActualLRPGroup `protobuf:"bytes,2,opt,name=actual_lrp_group,json=actualLrpGroup,proto3" json:"actual_lrp_group,omitempty"` +} + +func (m *ActualLRPGroupResponse) Reset() { *m = ActualLRPGroupResponse{} } +func (*ActualLRPGroupResponse) ProtoMessage() {} +func (*ActualLRPGroupResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{2} +} +func (m *ActualLRPGroupResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPGroupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPGroupResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPGroupResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPGroupResponse.Merge(m, src) +} +func (m *ActualLRPGroupResponse) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPGroupResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPGroupResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPGroupResponse proto.InternalMessageInfo + +func (m *ActualLRPGroupResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *ActualLRPGroupResponse) GetActualLrpGroup() *ActualLRPGroup { + if m != nil { + return m.ActualLrpGroup + } + return nil +} + +// Deprecated: Do not use. +type ActualLRPGroupsRequest struct { + Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain"` + CellId string `protobuf:"bytes,2,opt,name=cell_id,json=cellId,proto3" json:"cell_id"` +} + +func (m *ActualLRPGroupsRequest) Reset() { *m = ActualLRPGroupsRequest{} } +func (*ActualLRPGroupsRequest) ProtoMessage() {} +func (*ActualLRPGroupsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{3} +} +func (m *ActualLRPGroupsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPGroupsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPGroupsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPGroupsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPGroupsRequest.Merge(m, src) +} +func (m *ActualLRPGroupsRequest) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPGroupsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPGroupsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPGroupsRequest proto.InternalMessageInfo + +func (m *ActualLRPGroupsRequest) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +func (m *ActualLRPGroupsRequest) GetCellId() string { + if m != nil { + return m.CellId + } + return "" +} + +// Deprecated: Do not use. +type ActualLRPGroupsByProcessGuidRequest struct { + ProcessGuid string `protobuf:"bytes,1,opt,name=process_guid,json=processGuid,proto3" json:"process_guid"` +} + +func (m *ActualLRPGroupsByProcessGuidRequest) Reset() { *m = ActualLRPGroupsByProcessGuidRequest{} } +func (*ActualLRPGroupsByProcessGuidRequest) ProtoMessage() {} +func (*ActualLRPGroupsByProcessGuidRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{4} +} +func (m *ActualLRPGroupsByProcessGuidRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPGroupsByProcessGuidRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPGroupsByProcessGuidRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPGroupsByProcessGuidRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPGroupsByProcessGuidRequest.Merge(m, src) +} +func (m *ActualLRPGroupsByProcessGuidRequest) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPGroupsByProcessGuidRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPGroupsByProcessGuidRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPGroupsByProcessGuidRequest proto.InternalMessageInfo + +func (m *ActualLRPGroupsByProcessGuidRequest) GetProcessGuid() string { + if m != nil { + return m.ProcessGuid + } + return "" +} + +// Deprecated: Do not use. +type ActualLRPGroupByProcessGuidAndIndexRequest struct { + ProcessGuid string `protobuf:"bytes,1,opt,name=process_guid,json=processGuid,proto3" json:"process_guid"` + Index int32 `protobuf:"varint,2,opt,name=index,proto3" json:"index"` +} + +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) Reset() { + *m = ActualLRPGroupByProcessGuidAndIndexRequest{} +} +func (*ActualLRPGroupByProcessGuidAndIndexRequest) ProtoMessage() {} +func (*ActualLRPGroupByProcessGuidAndIndexRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{5} +} +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPGroupByProcessGuidAndIndexRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPGroupByProcessGuidAndIndexRequest.Merge(m, src) +} +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPGroupByProcessGuidAndIndexRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPGroupByProcessGuidAndIndexRequest proto.InternalMessageInfo + +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) GetProcessGuid() string { + if m != nil { + return m.ProcessGuid + } + return "" +} + +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) GetIndex() int32 { + if m != nil { + return m.Index + } + return 0 +} + +type ClaimActualLRPRequest struct { + ProcessGuid string `protobuf:"bytes,1,opt,name=process_guid,json=processGuid,proto3" json:"process_guid"` + Index int32 `protobuf:"varint,2,opt,name=index,proto3" json:"index"` + ActualLrpInstanceKey *ActualLRPInstanceKey `protobuf:"bytes,3,opt,name=actual_lrp_instance_key,json=actualLrpInstanceKey,proto3" json:"actual_lrp_instance_key,omitempty"` +} + +func (m *ClaimActualLRPRequest) Reset() { *m = ClaimActualLRPRequest{} } +func (*ClaimActualLRPRequest) ProtoMessage() {} +func (*ClaimActualLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{6} +} +func (m *ClaimActualLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClaimActualLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClaimActualLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClaimActualLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClaimActualLRPRequest.Merge(m, src) +} +func (m *ClaimActualLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *ClaimActualLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ClaimActualLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ClaimActualLRPRequest proto.InternalMessageInfo + +func (m *ClaimActualLRPRequest) GetProcessGuid() string { + if m != nil { + return m.ProcessGuid + } + return "" +} + +func (m *ClaimActualLRPRequest) GetIndex() int32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *ClaimActualLRPRequest) GetActualLrpInstanceKey() *ActualLRPInstanceKey { + if m != nil { + return m.ActualLrpInstanceKey + } + return nil +} + +type StartActualLRPRequest struct { + ActualLrpKey *ActualLRPKey `protobuf:"bytes,1,opt,name=actual_lrp_key,json=actualLrpKey,proto3" json:"actual_lrp_key,omitempty"` + ActualLrpInstanceKey *ActualLRPInstanceKey `protobuf:"bytes,2,opt,name=actual_lrp_instance_key,json=actualLrpInstanceKey,proto3" json:"actual_lrp_instance_key,omitempty"` + ActualLrpNetInfo *ActualLRPNetInfo `protobuf:"bytes,3,opt,name=actual_lrp_net_info,json=actualLrpNetInfo,proto3" json:"actual_lrp_net_info,omitempty"` + ActualLrpInternalRoutes []*ActualLRPInternalRoute `protobuf:"bytes,4,rep,name=actual_lrp_internal_routes,json=actualLrpInternalRoutes,proto3" json:"actual_lrp_internal_routes,omitempty"` + MetricTags map[string]string `protobuf:"bytes,5,rep,name=metric_tags,json=metricTags,proto3" json:"metric_tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Types that are valid to be assigned to OptionalRoutable: + // *StartActualLRPRequest_Routable + OptionalRoutable isStartActualLRPRequest_OptionalRoutable `protobuf_oneof:"optional_routable"` + AvailabilityZone string `protobuf:"bytes,7,opt,name=availability_zone,json=availabilityZone,proto3" json:"availability_zone"` +} + +func (m *StartActualLRPRequest) Reset() { *m = StartActualLRPRequest{} } +func (*StartActualLRPRequest) ProtoMessage() {} +func (*StartActualLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{7} +} +func (m *StartActualLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StartActualLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StartActualLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StartActualLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartActualLRPRequest.Merge(m, src) +} +func (m *StartActualLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *StartActualLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartActualLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartActualLRPRequest proto.InternalMessageInfo + +type isStartActualLRPRequest_OptionalRoutable interface { + isStartActualLRPRequest_OptionalRoutable() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type StartActualLRPRequest_Routable struct { + Routable bool `protobuf:"varint,6,opt,name=Routable,proto3,oneof" json:"Routable,omitempty"` +} + +func (*StartActualLRPRequest_Routable) isStartActualLRPRequest_OptionalRoutable() {} + +func (m *StartActualLRPRequest) GetOptionalRoutable() isStartActualLRPRequest_OptionalRoutable { + if m != nil { + return m.OptionalRoutable + } + return nil +} + +func (m *StartActualLRPRequest) GetActualLrpKey() *ActualLRPKey { + if m != nil { + return m.ActualLrpKey + } + return nil +} + +func (m *StartActualLRPRequest) GetActualLrpInstanceKey() *ActualLRPInstanceKey { + if m != nil { + return m.ActualLrpInstanceKey + } + return nil +} + +func (m *StartActualLRPRequest) GetActualLrpNetInfo() *ActualLRPNetInfo { + if m != nil { + return m.ActualLrpNetInfo + } + return nil +} + +func (m *StartActualLRPRequest) GetActualLrpInternalRoutes() []*ActualLRPInternalRoute { + if m != nil { + return m.ActualLrpInternalRoutes + } + return nil +} + +func (m *StartActualLRPRequest) GetMetricTags() map[string]string { + if m != nil { + return m.MetricTags + } + return nil +} + +func (m *StartActualLRPRequest) GetRoutable() bool { + if x, ok := m.GetOptionalRoutable().(*StartActualLRPRequest_Routable); ok { + return x.Routable + } + return false +} + +func (m *StartActualLRPRequest) GetAvailabilityZone() string { + if m != nil { + return m.AvailabilityZone + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*StartActualLRPRequest) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*StartActualLRPRequest_Routable)(nil), + } +} + +type CrashActualLRPRequest struct { + ActualLrpKey *ActualLRPKey `protobuf:"bytes,1,opt,name=actual_lrp_key,json=actualLrpKey,proto3" json:"actual_lrp_key,omitempty"` + ActualLrpInstanceKey *ActualLRPInstanceKey `protobuf:"bytes,2,opt,name=actual_lrp_instance_key,json=actualLrpInstanceKey,proto3" json:"actual_lrp_instance_key,omitempty"` + ErrorMessage string `protobuf:"bytes,3,opt,name=error_message,json=errorMessage,proto3" json:"error_message"` +} + +func (m *CrashActualLRPRequest) Reset() { *m = CrashActualLRPRequest{} } +func (*CrashActualLRPRequest) ProtoMessage() {} +func (*CrashActualLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{8} +} +func (m *CrashActualLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CrashActualLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CrashActualLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CrashActualLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CrashActualLRPRequest.Merge(m, src) +} +func (m *CrashActualLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *CrashActualLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CrashActualLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CrashActualLRPRequest proto.InternalMessageInfo + +func (m *CrashActualLRPRequest) GetActualLrpKey() *ActualLRPKey { + if m != nil { + return m.ActualLrpKey + } + return nil +} + +func (m *CrashActualLRPRequest) GetActualLrpInstanceKey() *ActualLRPInstanceKey { + if m != nil { + return m.ActualLrpInstanceKey + } + return nil +} + +func (m *CrashActualLRPRequest) GetErrorMessage() string { + if m != nil { + return m.ErrorMessage + } + return "" +} + +type FailActualLRPRequest struct { + ActualLrpKey *ActualLRPKey `protobuf:"bytes,1,opt,name=actual_lrp_key,json=actualLrpKey,proto3" json:"actual_lrp_key,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message"` +} + +func (m *FailActualLRPRequest) Reset() { *m = FailActualLRPRequest{} } +func (*FailActualLRPRequest) ProtoMessage() {} +func (*FailActualLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{9} +} +func (m *FailActualLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FailActualLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FailActualLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FailActualLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_FailActualLRPRequest.Merge(m, src) +} +func (m *FailActualLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *FailActualLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_FailActualLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_FailActualLRPRequest proto.InternalMessageInfo + +func (m *FailActualLRPRequest) GetActualLrpKey() *ActualLRPKey { + if m != nil { + return m.ActualLrpKey + } + return nil +} + +func (m *FailActualLRPRequest) GetErrorMessage() string { + if m != nil { + return m.ErrorMessage + } + return "" +} + +type RetireActualLRPRequest struct { + ActualLrpKey *ActualLRPKey `protobuf:"bytes,1,opt,name=actual_lrp_key,json=actualLrpKey,proto3" json:"actual_lrp_key,omitempty"` +} + +func (m *RetireActualLRPRequest) Reset() { *m = RetireActualLRPRequest{} } +func (*RetireActualLRPRequest) ProtoMessage() {} +func (*RetireActualLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{10} +} +func (m *RetireActualLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RetireActualLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RetireActualLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RetireActualLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RetireActualLRPRequest.Merge(m, src) +} +func (m *RetireActualLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *RetireActualLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RetireActualLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RetireActualLRPRequest proto.InternalMessageInfo + +func (m *RetireActualLRPRequest) GetActualLrpKey() *ActualLRPKey { + if m != nil { + return m.ActualLrpKey + } + return nil +} + +type RemoveActualLRPRequest struct { + ProcessGuid string `protobuf:"bytes,1,opt,name=process_guid,json=processGuid,proto3" json:"process_guid"` + Index int32 `protobuf:"varint,2,opt,name=index,proto3" json:"index"` + ActualLrpInstanceKey *ActualLRPInstanceKey `protobuf:"bytes,3,opt,name=actual_lrp_instance_key,json=actualLrpInstanceKey,proto3" json:"actual_lrp_instance_key,omitempty"` +} + +func (m *RemoveActualLRPRequest) Reset() { *m = RemoveActualLRPRequest{} } +func (*RemoveActualLRPRequest) ProtoMessage() {} +func (*RemoveActualLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{11} +} +func (m *RemoveActualLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemoveActualLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemoveActualLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemoveActualLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveActualLRPRequest.Merge(m, src) +} +func (m *RemoveActualLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *RemoveActualLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveActualLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveActualLRPRequest proto.InternalMessageInfo + +func (m *RemoveActualLRPRequest) GetProcessGuid() string { + if m != nil { + return m.ProcessGuid + } + return "" +} + +func (m *RemoveActualLRPRequest) GetIndex() int32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *RemoveActualLRPRequest) GetActualLrpInstanceKey() *ActualLRPInstanceKey { + if m != nil { + return m.ActualLrpInstanceKey + } + return nil +} + +type ActualLRPsResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + ActualLrps []*ActualLRP `protobuf:"bytes,2,rep,name=actual_lrps,json=actualLrps,proto3" json:"actual_lrps,omitempty"` +} + +func (m *ActualLRPsResponse) Reset() { *m = ActualLRPsResponse{} } +func (*ActualLRPsResponse) ProtoMessage() {} +func (*ActualLRPsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{12} +} +func (m *ActualLRPsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPsResponse.Merge(m, src) +} +func (m *ActualLRPsResponse) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPsResponse proto.InternalMessageInfo + +func (m *ActualLRPsResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *ActualLRPsResponse) GetActualLrps() []*ActualLRP { + if m != nil { + return m.ActualLrps + } + return nil +} + +type ActualLRPsRequest struct { + Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain"` + CellId string `protobuf:"bytes,2,opt,name=cell_id,json=cellId,proto3" json:"cell_id"` + ProcessGuid string `protobuf:"bytes,3,opt,name=process_guid,json=processGuid,proto3" json:"process_guid"` + // Types that are valid to be assigned to OptionalIndex: + // *ActualLRPsRequest_Index + OptionalIndex isActualLRPsRequest_OptionalIndex `protobuf_oneof:"optional_index"` +} + +func (m *ActualLRPsRequest) Reset() { *m = ActualLRPsRequest{} } +func (*ActualLRPsRequest) ProtoMessage() {} +func (*ActualLRPsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a7753fd8557db809, []int{13} +} +func (m *ActualLRPsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPsRequest.Merge(m, src) +} +func (m *ActualLRPsRequest) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPsRequest proto.InternalMessageInfo + +type isActualLRPsRequest_OptionalIndex interface { + isActualLRPsRequest_OptionalIndex() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type ActualLRPsRequest_Index struct { + Index int32 `protobuf:"varint,4,opt,name=index,proto3,oneof" json:"index"` +} + +func (*ActualLRPsRequest_Index) isActualLRPsRequest_OptionalIndex() {} + +func (m *ActualLRPsRequest) GetOptionalIndex() isActualLRPsRequest_OptionalIndex { + if m != nil { + return m.OptionalIndex + } + return nil +} + +func (m *ActualLRPsRequest) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +func (m *ActualLRPsRequest) GetCellId() string { + if m != nil { + return m.CellId + } + return "" +} + +func (m *ActualLRPsRequest) GetProcessGuid() string { + if m != nil { + return m.ProcessGuid + } + return "" +} + +func (m *ActualLRPsRequest) GetIndex() int32 { + if x, ok := m.GetOptionalIndex().(*ActualLRPsRequest_Index); ok { + return x.Index + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ActualLRPsRequest) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ActualLRPsRequest_Index)(nil), + } +} + +func init() { + proto.RegisterType((*ActualLRPLifecycleResponse)(nil), "models.ActualLRPLifecycleResponse") + proto.RegisterType((*ActualLRPGroupsResponse)(nil), "models.ActualLRPGroupsResponse") + proto.RegisterType((*ActualLRPGroupResponse)(nil), "models.ActualLRPGroupResponse") + proto.RegisterType((*ActualLRPGroupsRequest)(nil), "models.ActualLRPGroupsRequest") + proto.RegisterType((*ActualLRPGroupsByProcessGuidRequest)(nil), "models.ActualLRPGroupsByProcessGuidRequest") + proto.RegisterType((*ActualLRPGroupByProcessGuidAndIndexRequest)(nil), "models.ActualLRPGroupByProcessGuidAndIndexRequest") + proto.RegisterType((*ClaimActualLRPRequest)(nil), "models.ClaimActualLRPRequest") + proto.RegisterType((*StartActualLRPRequest)(nil), "models.StartActualLRPRequest") + proto.RegisterMapType((map[string]string)(nil), "models.StartActualLRPRequest.MetricTagsEntry") + proto.RegisterType((*CrashActualLRPRequest)(nil), "models.CrashActualLRPRequest") + proto.RegisterType((*FailActualLRPRequest)(nil), "models.FailActualLRPRequest") + proto.RegisterType((*RetireActualLRPRequest)(nil), "models.RetireActualLRPRequest") + proto.RegisterType((*RemoveActualLRPRequest)(nil), "models.RemoveActualLRPRequest") + proto.RegisterType((*ActualLRPsResponse)(nil), "models.ActualLRPsResponse") + proto.RegisterType((*ActualLRPsRequest)(nil), "models.ActualLRPsRequest") +} + +func init() { proto.RegisterFile("actual_lrp_requests.proto", fileDescriptor_a7753fd8557db809) } + +var fileDescriptor_a7753fd8557db809 = []byte{ + // 851 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x56, 0xcf, 0x6f, 0x1b, 0x45, + 0x14, 0xf6, 0x38, 0xb1, 0x5b, 0x3f, 0xa7, 0xa9, 0xbd, 0xcd, 0x8f, 0xc5, 0xaa, 0xd6, 0x61, 0xcb, + 0x21, 0x42, 0xaa, 0x2b, 0xa5, 0x08, 0xa1, 0x48, 0x48, 0xc4, 0xa8, 0xa4, 0x56, 0xd3, 0xaa, 0x9a, + 0xf6, 0x04, 0x12, 0xab, 0xb1, 0x3d, 0x76, 0x47, 0xec, 0xee, 0x98, 0x99, 0xd9, 0x08, 0x73, 0x42, + 0x42, 0xea, 0x81, 0x13, 0x7f, 0x06, 0x7f, 0x07, 0x1c, 0xe0, 0x98, 0x03, 0x87, 0x9e, 0xac, 0xc6, + 0xb9, 0x20, 0x9f, 0xfa, 0x27, 0xa0, 0x9d, 0xf1, 0x6e, 0xd6, 0xde, 0x82, 0x1a, 0x08, 0x12, 0x3d, + 0xed, 0xbc, 0x6f, 0xde, 0x7c, 0xdf, 0x37, 0x6f, 0xdf, 0xcc, 0x2e, 0xbc, 0x43, 0x7a, 0x2a, 0x22, + 0xbe, 0xe7, 0x8b, 0x91, 0x27, 0xe8, 0xd7, 0x11, 0x95, 0x4a, 0xb6, 0x46, 0x82, 0x2b, 0x6e, 0x95, + 0x03, 0xde, 0xa7, 0xbe, 0x6c, 0xdc, 0x1e, 0x32, 0xf5, 0x2c, 0xea, 0xb6, 0x7a, 0x3c, 0xb8, 0x33, + 0xe4, 0x43, 0x7e, 0x47, 0x4f, 0x77, 0xa3, 0x81, 0x8e, 0x74, 0xa0, 0x47, 0x66, 0x59, 0xa3, 0x76, + 0xce, 0x38, 0x47, 0xaa, 0x54, 0x08, 0x2e, 0x4c, 0xe0, 0x1e, 0x40, 0xe3, 0x40, 0x27, 0x1c, 0xe1, + 0xc7, 0x47, 0x6c, 0x40, 0x7b, 0xe3, 0x9e, 0x4f, 0x31, 0x95, 0x23, 0x1e, 0x4a, 0x6a, 0xdd, 0x82, + 0x92, 0x4e, 0xb6, 0xd1, 0x0e, 0xda, 0xad, 0xee, 0x5d, 0x6b, 0x19, 0x0f, 0xad, 0x7b, 0x31, 0x88, + 0xcd, 0x9c, 0xfb, 0x1c, 0xc1, 0x76, 0xca, 0x71, 0x28, 0x78, 0x34, 0x92, 0x17, 0x22, 0xb0, 0xda, + 0x50, 0xcf, 0x6c, 0x7b, 0xa8, 0x19, 0xec, 0xe2, 0xce, 0xca, 0x6e, 0x75, 0x6f, 0x2b, 0x59, 0xb0, + 0x28, 0x80, 0xaf, 0x9b, 0x05, 0x47, 0x62, 0x64, 0x04, 0xf7, 0x8b, 0x36, 0x72, 0xbf, 0x47, 0xb0, + 0xb5, 0x94, 0x77, 0x21, 0x1f, 0x9f, 0x40, 0x6d, 0xd9, 0x87, 0x5d, 0xd4, 0xf9, 0x7f, 0x65, 0x63, + 0x7d, 0xd1, 0x86, 0x76, 0x31, 0x58, 0x36, 0x21, 0xb1, 0x79, 0x91, 0x96, 0x0b, 0xe5, 0x3e, 0x0f, + 0x08, 0x0b, 0xb5, 0x8b, 0x4a, 0x1b, 0x66, 0x93, 0xe6, 0x1c, 0xc1, 0xf3, 0xa7, 0xf5, 0x1e, 0x5c, + 0xe9, 0x51, 0xdf, 0xf7, 0x58, 0x5f, 0x4b, 0x57, 0xda, 0xd5, 0xd9, 0xa4, 0x99, 0x40, 0xb8, 0x1c, + 0x0f, 0x3a, 0x7d, 0xad, 0xf3, 0x25, 0xdc, 0x5a, 0xd2, 0x69, 0x8f, 0x1f, 0x0b, 0xde, 0xa3, 0x52, + 0x1e, 0x46, 0xac, 0x9f, 0x88, 0xde, 0x85, 0xb5, 0x91, 0x41, 0xbd, 0x61, 0xc4, 0xfa, 0x73, 0xe9, + 0xda, 0x6c, 0xd2, 0x5c, 0xc0, 0x71, 0x75, 0x74, 0xbe, 0x56, 0xf3, 0x3f, 0x47, 0xf0, 0xfe, 0xa2, + 0xc0, 0x02, 0xff, 0x41, 0xd8, 0xef, 0x84, 0x7d, 0xfa, 0xcd, 0xbf, 0xd1, 0xb1, 0x9a, 0x50, 0x62, + 0x31, 0x89, 0xde, 0x6b, 0xa9, 0x5d, 0x99, 0x4d, 0x9a, 0x06, 0xc0, 0xe6, 0xa1, 0x8d, 0xfc, 0x8c, + 0x60, 0xf3, 0x53, 0x9f, 0xb0, 0x20, 0x75, 0xf3, 0x9f, 0x6a, 0x5a, 0x4f, 0x60, 0x3b, 0xd3, 0x06, + 0x2c, 0x94, 0x8a, 0x84, 0x3d, 0xea, 0x7d, 0x45, 0xc7, 0xf6, 0x8a, 0xee, 0x86, 0x9b, 0xb9, 0x6e, + 0xe8, 0xcc, 0x93, 0x1e, 0xd0, 0x31, 0xde, 0x48, 0x7b, 0x22, 0x83, 0xba, 0xbf, 0xaf, 0xc2, 0xe6, + 0x13, 0x45, 0x84, 0xca, 0x6d, 0x62, 0x1f, 0xd6, 0x33, 0x72, 0xb1, 0x8a, 0xe9, 0xd1, 0x8d, 0x9c, + 0x4a, 0xcc, 0xbe, 0x96, 0xb2, 0x3f, 0xa0, 0xe3, 0xbf, 0xb3, 0x5a, 0xfc, 0xa7, 0x56, 0xad, 0x43, + 0xb8, 0x91, 0x21, 0x0d, 0xa9, 0xf2, 0x58, 0x38, 0xe0, 0xf3, 0xbd, 0xdb, 0x39, 0xc2, 0x47, 0x54, + 0x75, 0xc2, 0x01, 0xc7, 0xb5, 0x94, 0x6c, 0x8e, 0x58, 0x5f, 0x40, 0x63, 0xc1, 0x9d, 0xa2, 0x22, + 0x24, 0xbe, 0x27, 0x78, 0xa4, 0xa8, 0xb4, 0x57, 0xf5, 0x01, 0x77, 0x5e, 0x63, 0xd0, 0xe4, 0xe1, + 0x38, 0x0d, 0x6f, 0x67, 0x2c, 0x66, 0x70, 0x69, 0x3d, 0x82, 0x6a, 0x40, 0x95, 0x60, 0x3d, 0x4f, + 0x91, 0xa1, 0xb4, 0x4b, 0x9a, 0xed, 0x76, 0xc2, 0xf6, 0xda, 0x52, 0xb7, 0x1e, 0xea, 0x05, 0x4f, + 0xc9, 0x50, 0xde, 0x0b, 0x95, 0x18, 0x63, 0x08, 0x52, 0xc0, 0xba, 0x09, 0x57, 0x63, 0x66, 0xd2, + 0xf5, 0xa9, 0x5d, 0xde, 0x41, 0xbb, 0x57, 0xef, 0x17, 0x70, 0x8a, 0xe8, 0x2b, 0xea, 0x98, 0x30, + 0x9f, 0x74, 0x99, 0xcf, 0xd4, 0xd8, 0xfb, 0x96, 0x87, 0xd4, 0xbe, 0xa2, 0xdb, 0x6d, 0x73, 0x36, + 0x69, 0xe6, 0x27, 0x71, 0x2d, 0x0b, 0x7d, 0xce, 0x43, 0xda, 0xf8, 0x18, 0xae, 0x2f, 0x19, 0xb0, + 0x6a, 0xb0, 0x92, 0xbc, 0xf0, 0x0a, 0x8e, 0x87, 0xd6, 0x06, 0x94, 0x8e, 0x89, 0x1f, 0x51, 0x73, + 0xfa, 0xb1, 0x09, 0xf6, 0x8b, 0x1f, 0xa1, 0xf6, 0x0d, 0xa8, 0xf3, 0x91, 0x62, 0x3c, 0x29, 0x61, + 0xec, 0xcb, 0x7d, 0x19, 0x9f, 0x0d, 0x41, 0xe4, 0xb3, 0xff, 0x7f, 0x5b, 0x7d, 0x08, 0xd7, 0xf4, + 0x35, 0xeb, 0x05, 0x54, 0x4a, 0x32, 0xa4, 0xba, 0xa1, 0x2a, 0xed, 0xfa, 0x6c, 0xd2, 0x5c, 0x9c, + 0xc0, 0x6b, 0x3a, 0x7c, 0x68, 0x22, 0xf7, 0x07, 0x04, 0x1b, 0x9f, 0x11, 0xe6, 0x5f, 0xea, 0x0e, + 0x73, 0x66, 0x8a, 0x6f, 0x66, 0xe6, 0x29, 0x6c, 0x61, 0xaa, 0x98, 0xa0, 0x97, 0xe9, 0xc6, 0xfd, + 0x05, 0xc5, 0xb4, 0x01, 0x3f, 0xa6, 0x6f, 0xf3, 0x15, 0x17, 0x80, 0x95, 0x66, 0x5f, 0xf0, 0x0f, + 0x60, 0x0f, 0xaa, 0xe7, 0x7e, 0x92, 0x6f, 0x7f, 0x3d, 0xe7, 0x01, 0x43, 0x2a, 0x2c, 0xdd, 0x5f, + 0x11, 0xd4, 0xb3, 0x7a, 0x97, 0xfc, 0x8d, 0xcd, 0x55, 0x7e, 0xe5, 0x4d, 0x2a, 0xff, 0x6e, 0x52, + 0xf9, 0xd5, 0xa5, 0xca, 0xdf, 0x2f, 0xcc, 0x6b, 0xdf, 0xae, 0xc1, 0x7a, 0x7a, 0x8e, 0x0d, 0xf2, + 0xc1, 0xc9, 0xa9, 0x53, 0x78, 0x71, 0xea, 0x14, 0x5e, 0x9d, 0x3a, 0xe8, 0xbb, 0xa9, 0x83, 0x7e, + 0x9a, 0x3a, 0xe8, 0xb7, 0xa9, 0x83, 0x4e, 0xa6, 0x0e, 0x7a, 0x39, 0x75, 0xd0, 0x1f, 0x53, 0xa7, + 0xf0, 0x6a, 0xea, 0xa0, 0x1f, 0xcf, 0x9c, 0xc2, 0xc9, 0x99, 0x53, 0x78, 0x71, 0xe6, 0x14, 0xba, + 0x65, 0xfd, 0x03, 0x77, 0xf7, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xee, 0x85, 0x3a, 0xee, 0x33, + 0x0a, 0x00, 0x00, +} + +func (this *ActualLRPLifecycleResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPLifecycleResponse) + if !ok { + that2, ok := that.(ActualLRPLifecycleResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + return true +} +func (this *ActualLRPGroupsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPGroupsResponse) + if !ok { + that2, ok := that.(ActualLRPGroupsResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + if len(this.ActualLrpGroups) != len(that1.ActualLrpGroups) { + return false + } + for i := range this.ActualLrpGroups { + if !this.ActualLrpGroups[i].Equal(that1.ActualLrpGroups[i]) { + return false + } + } + return true +} +func (this *ActualLRPGroupResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPGroupResponse) + if !ok { + that2, ok := that.(ActualLRPGroupResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + if !this.ActualLrpGroup.Equal(that1.ActualLrpGroup) { + return false + } + return true +} +func (this *ActualLRPGroupsRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPGroupsRequest) + if !ok { + that2, ok := that.(ActualLRPGroupsRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Domain != that1.Domain { + return false + } + if this.CellId != that1.CellId { + return false + } + return true +} +func (this *ActualLRPGroupsByProcessGuidRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPGroupsByProcessGuidRequest) + if !ok { + that2, ok := that.(ActualLRPGroupsByProcessGuidRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ProcessGuid != that1.ProcessGuid { + return false + } + return true +} +func (this *ActualLRPGroupByProcessGuidAndIndexRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPGroupByProcessGuidAndIndexRequest) + if !ok { + that2, ok := that.(ActualLRPGroupByProcessGuidAndIndexRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ProcessGuid != that1.ProcessGuid { + return false + } + if this.Index != that1.Index { + return false + } + return true +} +func (this *ClaimActualLRPRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ClaimActualLRPRequest) + if !ok { + that2, ok := that.(ClaimActualLRPRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ProcessGuid != that1.ProcessGuid { + return false + } + if this.Index != that1.Index { + return false + } + if !this.ActualLrpInstanceKey.Equal(that1.ActualLrpInstanceKey) { + return false + } + return true +} +func (this *StartActualLRPRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StartActualLRPRequest) + if !ok { + that2, ok := that.(StartActualLRPRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ActualLrpKey.Equal(that1.ActualLrpKey) { + return false + } + if !this.ActualLrpInstanceKey.Equal(that1.ActualLrpInstanceKey) { + return false + } + if !this.ActualLrpNetInfo.Equal(that1.ActualLrpNetInfo) { + return false + } + if len(this.ActualLrpInternalRoutes) != len(that1.ActualLrpInternalRoutes) { + return false + } + for i := range this.ActualLrpInternalRoutes { + if !this.ActualLrpInternalRoutes[i].Equal(that1.ActualLrpInternalRoutes[i]) { + return false + } + } + if len(this.MetricTags) != len(that1.MetricTags) { + return false + } + for i := range this.MetricTags { + if this.MetricTags[i] != that1.MetricTags[i] { + return false + } + } + if that1.OptionalRoutable == nil { + if this.OptionalRoutable != nil { + return false + } + } else if this.OptionalRoutable == nil { + return false + } else if !this.OptionalRoutable.Equal(that1.OptionalRoutable) { + return false + } + if this.AvailabilityZone != that1.AvailabilityZone { + return false + } + return true +} +func (this *StartActualLRPRequest_Routable) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StartActualLRPRequest_Routable) + if !ok { + that2, ok := that.(StartActualLRPRequest_Routable) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Routable != that1.Routable { + return false + } + return true +} +func (this *CrashActualLRPRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CrashActualLRPRequest) + if !ok { + that2, ok := that.(CrashActualLRPRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ActualLrpKey.Equal(that1.ActualLrpKey) { + return false + } + if !this.ActualLrpInstanceKey.Equal(that1.ActualLrpInstanceKey) { + return false + } + if this.ErrorMessage != that1.ErrorMessage { + return false + } + return true +} +func (this *FailActualLRPRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FailActualLRPRequest) + if !ok { + that2, ok := that.(FailActualLRPRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ActualLrpKey.Equal(that1.ActualLrpKey) { + return false + } + if this.ErrorMessage != that1.ErrorMessage { + return false + } + return true +} +func (this *RetireActualLRPRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RetireActualLRPRequest) + if !ok { + that2, ok := that.(RetireActualLRPRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ActualLrpKey.Equal(that1.ActualLrpKey) { + return false + } + return true +} +func (this *RemoveActualLRPRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RemoveActualLRPRequest) + if !ok { + that2, ok := that.(RemoveActualLRPRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ProcessGuid != that1.ProcessGuid { + return false + } + if this.Index != that1.Index { + return false + } + if !this.ActualLrpInstanceKey.Equal(that1.ActualLrpInstanceKey) { + return false + } + return true +} +func (this *ActualLRPsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPsResponse) + if !ok { + that2, ok := that.(ActualLRPsResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + if len(this.ActualLrps) != len(that1.ActualLrps) { + return false + } + for i := range this.ActualLrps { + if !this.ActualLrps[i].Equal(that1.ActualLrps[i]) { + return false + } + } + return true +} +func (this *ActualLRPsRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPsRequest) + if !ok { + that2, ok := that.(ActualLRPsRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Domain != that1.Domain { + return false + } + if this.CellId != that1.CellId { + return false + } + if this.ProcessGuid != that1.ProcessGuid { + return false + } + if that1.OptionalIndex == nil { + if this.OptionalIndex != nil { + return false + } + } else if this.OptionalIndex == nil { + return false + } else if !this.OptionalIndex.Equal(that1.OptionalIndex) { + return false + } + return true +} +func (this *ActualLRPsRequest_Index) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPsRequest_Index) + if !ok { + that2, ok := that.(ActualLRPsRequest_Index) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Index != that1.Index { + return false + } + return true +} +func (this *ActualLRPLifecycleResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.ActualLRPLifecycleResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPGroupsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ActualLRPGroupsResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + if this.ActualLrpGroups != nil { + s = append(s, "ActualLrpGroups: "+fmt.Sprintf("%#v", this.ActualLrpGroups)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPGroupResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ActualLRPGroupResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + if this.ActualLrpGroup != nil { + s = append(s, "ActualLrpGroup: "+fmt.Sprintf("%#v", this.ActualLrpGroup)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPGroupsRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ActualLRPGroupsRequest{") + s = append(s, "Domain: "+fmt.Sprintf("%#v", this.Domain)+",\n") + s = append(s, "CellId: "+fmt.Sprintf("%#v", this.CellId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPGroupsByProcessGuidRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.ActualLRPGroupsByProcessGuidRequest{") + s = append(s, "ProcessGuid: "+fmt.Sprintf("%#v", this.ProcessGuid)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPGroupByProcessGuidAndIndexRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ActualLRPGroupByProcessGuidAndIndexRequest{") + s = append(s, "ProcessGuid: "+fmt.Sprintf("%#v", this.ProcessGuid)+",\n") + s = append(s, "Index: "+fmt.Sprintf("%#v", this.Index)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ClaimActualLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&models.ClaimActualLRPRequest{") + s = append(s, "ProcessGuid: "+fmt.Sprintf("%#v", this.ProcessGuid)+",\n") + s = append(s, "Index: "+fmt.Sprintf("%#v", this.Index)+",\n") + if this.ActualLrpInstanceKey != nil { + s = append(s, "ActualLrpInstanceKey: "+fmt.Sprintf("%#v", this.ActualLrpInstanceKey)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StartActualLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&models.StartActualLRPRequest{") + if this.ActualLrpKey != nil { + s = append(s, "ActualLrpKey: "+fmt.Sprintf("%#v", this.ActualLrpKey)+",\n") + } + if this.ActualLrpInstanceKey != nil { + s = append(s, "ActualLrpInstanceKey: "+fmt.Sprintf("%#v", this.ActualLrpInstanceKey)+",\n") + } + if this.ActualLrpNetInfo != nil { + s = append(s, "ActualLrpNetInfo: "+fmt.Sprintf("%#v", this.ActualLrpNetInfo)+",\n") + } + if this.ActualLrpInternalRoutes != nil { + s = append(s, "ActualLrpInternalRoutes: "+fmt.Sprintf("%#v", this.ActualLrpInternalRoutes)+",\n") + } + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]string{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%#v: %#v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + if this.MetricTags != nil { + s = append(s, "MetricTags: "+mapStringForMetricTags+",\n") + } + if this.OptionalRoutable != nil { + s = append(s, "OptionalRoutable: "+fmt.Sprintf("%#v", this.OptionalRoutable)+",\n") + } + s = append(s, "AvailabilityZone: "+fmt.Sprintf("%#v", this.AvailabilityZone)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StartActualLRPRequest_Routable) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&models.StartActualLRPRequest_Routable{` + + `Routable:` + fmt.Sprintf("%#v", this.Routable) + `}`}, ", ") + return s +} +func (this *CrashActualLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&models.CrashActualLRPRequest{") + if this.ActualLrpKey != nil { + s = append(s, "ActualLrpKey: "+fmt.Sprintf("%#v", this.ActualLrpKey)+",\n") + } + if this.ActualLrpInstanceKey != nil { + s = append(s, "ActualLrpInstanceKey: "+fmt.Sprintf("%#v", this.ActualLrpInstanceKey)+",\n") + } + s = append(s, "ErrorMessage: "+fmt.Sprintf("%#v", this.ErrorMessage)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FailActualLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.FailActualLRPRequest{") + if this.ActualLrpKey != nil { + s = append(s, "ActualLrpKey: "+fmt.Sprintf("%#v", this.ActualLrpKey)+",\n") + } + s = append(s, "ErrorMessage: "+fmt.Sprintf("%#v", this.ErrorMessage)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RetireActualLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.RetireActualLRPRequest{") + if this.ActualLrpKey != nil { + s = append(s, "ActualLrpKey: "+fmt.Sprintf("%#v", this.ActualLrpKey)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RemoveActualLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&models.RemoveActualLRPRequest{") + s = append(s, "ProcessGuid: "+fmt.Sprintf("%#v", this.ProcessGuid)+",\n") + s = append(s, "Index: "+fmt.Sprintf("%#v", this.Index)+",\n") + if this.ActualLrpInstanceKey != nil { + s = append(s, "ActualLrpInstanceKey: "+fmt.Sprintf("%#v", this.ActualLrpInstanceKey)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ActualLRPsResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + if this.ActualLrps != nil { + s = append(s, "ActualLrps: "+fmt.Sprintf("%#v", this.ActualLrps)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPsRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&models.ActualLRPsRequest{") + s = append(s, "Domain: "+fmt.Sprintf("%#v", this.Domain)+",\n") + s = append(s, "CellId: "+fmt.Sprintf("%#v", this.CellId)+",\n") + s = append(s, "ProcessGuid: "+fmt.Sprintf("%#v", this.ProcessGuid)+",\n") + if this.OptionalIndex != nil { + s = append(s, "OptionalIndex: "+fmt.Sprintf("%#v", this.OptionalIndex)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPsRequest_Index) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&models.ActualLRPsRequest_Index{` + + `Index:` + fmt.Sprintf("%#v", this.Index) + `}`}, ", ") + return s +} +func valueToGoStringActualLrpRequests(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *ActualLRPLifecycleResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPLifecycleResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPLifecycleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPGroupsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPGroupsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPGroupsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ActualLrpGroups) > 0 { + for iNdEx := len(m.ActualLrpGroups) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ActualLrpGroups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPGroupResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPGroupResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPGroupResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ActualLrpGroup != nil { + { + size, err := m.ActualLrpGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPGroupsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPGroupsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPGroupsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.CellId) > 0 { + i -= len(m.CellId) + copy(dAtA[i:], m.CellId) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(m.CellId))) + i-- + dAtA[i] = 0x12 + } + if len(m.Domain) > 0 { + i -= len(m.Domain) + copy(dAtA[i:], m.Domain) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(m.Domain))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPGroupsByProcessGuidRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPGroupsByProcessGuidRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPGroupsByProcessGuidRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ProcessGuid) > 0 { + i -= len(m.ProcessGuid) + copy(dAtA[i:], m.ProcessGuid) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(m.ProcessGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Index != 0 { + i = encodeVarintActualLrpRequests(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x10 + } + if len(m.ProcessGuid) > 0 { + i -= len(m.ProcessGuid) + copy(dAtA[i:], m.ProcessGuid) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(m.ProcessGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClaimActualLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClaimActualLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClaimActualLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ActualLrpInstanceKey != nil { + { + size, err := m.ActualLrpInstanceKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Index != 0 { + i = encodeVarintActualLrpRequests(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x10 + } + if len(m.ProcessGuid) > 0 { + i -= len(m.ProcessGuid) + copy(dAtA[i:], m.ProcessGuid) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(m.ProcessGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StartActualLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartActualLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StartActualLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AvailabilityZone) > 0 { + i -= len(m.AvailabilityZone) + copy(dAtA[i:], m.AvailabilityZone) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(m.AvailabilityZone))) + i-- + dAtA[i] = 0x3a + } + if m.OptionalRoutable != nil { + { + size := m.OptionalRoutable.Size() + i -= size + if _, err := m.OptionalRoutable.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if len(m.MetricTags) > 0 { + for k := range m.MetricTags { + v := m.MetricTags[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintActualLrpRequests(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.ActualLrpInternalRoutes) > 0 { + for iNdEx := len(m.ActualLrpInternalRoutes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ActualLrpInternalRoutes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.ActualLrpNetInfo != nil { + { + size, err := m.ActualLrpNetInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ActualLrpInstanceKey != nil { + { + size, err := m.ActualLrpInstanceKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ActualLrpKey != nil { + { + size, err := m.ActualLrpKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StartActualLRPRequest_Routable) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StartActualLRPRequest_Routable) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.Routable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + return len(dAtA) - i, nil +} +func (m *CrashActualLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CrashActualLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CrashActualLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ErrorMessage) > 0 { + i -= len(m.ErrorMessage) + copy(dAtA[i:], m.ErrorMessage) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(m.ErrorMessage))) + i-- + dAtA[i] = 0x1a + } + if m.ActualLrpInstanceKey != nil { + { + size, err := m.ActualLrpInstanceKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ActualLrpKey != nil { + { + size, err := m.ActualLrpKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FailActualLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FailActualLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FailActualLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ErrorMessage) > 0 { + i -= len(m.ErrorMessage) + copy(dAtA[i:], m.ErrorMessage) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(m.ErrorMessage))) + i-- + dAtA[i] = 0x12 + } + if m.ActualLrpKey != nil { + { + size, err := m.ActualLrpKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RetireActualLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetireActualLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RetireActualLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ActualLrpKey != nil { + { + size, err := m.ActualLrpKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoveActualLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveActualLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemoveActualLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ActualLrpInstanceKey != nil { + { + size, err := m.ActualLrpInstanceKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Index != 0 { + i = encodeVarintActualLrpRequests(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x10 + } + if len(m.ProcessGuid) > 0 { + i -= len(m.ProcessGuid) + copy(dAtA[i:], m.ProcessGuid) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(m.ProcessGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ActualLrps) > 0 { + for iNdEx := len(m.ActualLrps) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ActualLrps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintActualLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.OptionalIndex != nil { + { + size := m.OptionalIndex.Size() + i -= size + if _, err := m.OptionalIndex.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if len(m.ProcessGuid) > 0 { + i -= len(m.ProcessGuid) + copy(dAtA[i:], m.ProcessGuid) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(m.ProcessGuid))) + i-- + dAtA[i] = 0x1a + } + if len(m.CellId) > 0 { + i -= len(m.CellId) + copy(dAtA[i:], m.CellId) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(m.CellId))) + i-- + dAtA[i] = 0x12 + } + if len(m.Domain) > 0 { + i -= len(m.Domain) + copy(dAtA[i:], m.Domain) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(len(m.Domain))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPsRequest_Index) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPsRequest_Index) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintActualLrpRequests(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func encodeVarintActualLrpRequests(dAtA []byte, offset int, v uint64) int { + offset -= sovActualLrpRequests(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ActualLRPLifecycleResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + return n +} + +func (m *ActualLRPGroupsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + if len(m.ActualLrpGroups) > 0 { + for _, e := range m.ActualLrpGroups { + l = e.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + } + return n +} + +func (m *ActualLRPGroupResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + if m.ActualLrpGroup != nil { + l = m.ActualLrpGroup.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + return n +} + +func (m *ActualLRPGroupsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Domain) + if l > 0 { + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + l = len(m.CellId) + if l > 0 { + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + return n +} + +func (m *ActualLRPGroupsByProcessGuidRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProcessGuid) + if l > 0 { + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + return n +} + +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProcessGuid) + if l > 0 { + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + if m.Index != 0 { + n += 1 + sovActualLrpRequests(uint64(m.Index)) + } + return n +} + +func (m *ClaimActualLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProcessGuid) + if l > 0 { + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + if m.Index != 0 { + n += 1 + sovActualLrpRequests(uint64(m.Index)) + } + if m.ActualLrpInstanceKey != nil { + l = m.ActualLrpInstanceKey.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + return n +} + +func (m *StartActualLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrpKey != nil { + l = m.ActualLrpKey.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + if m.ActualLrpInstanceKey != nil { + l = m.ActualLrpInstanceKey.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + if m.ActualLrpNetInfo != nil { + l = m.ActualLrpNetInfo.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + if len(m.ActualLrpInternalRoutes) > 0 { + for _, e := range m.ActualLrpInternalRoutes { + l = e.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + } + if len(m.MetricTags) > 0 { + for k, v := range m.MetricTags { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovActualLrpRequests(uint64(len(k))) + 1 + len(v) + sovActualLrpRequests(uint64(len(v))) + n += mapEntrySize + 1 + sovActualLrpRequests(uint64(mapEntrySize)) + } + } + if m.OptionalRoutable != nil { + n += m.OptionalRoutable.Size() + } + l = len(m.AvailabilityZone) + if l > 0 { + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + return n +} + +func (m *StartActualLRPRequest_Routable) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *CrashActualLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrpKey != nil { + l = m.ActualLrpKey.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + if m.ActualLrpInstanceKey != nil { + l = m.ActualLrpInstanceKey.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + l = len(m.ErrorMessage) + if l > 0 { + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + return n +} + +func (m *FailActualLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrpKey != nil { + l = m.ActualLrpKey.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + l = len(m.ErrorMessage) + if l > 0 { + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + return n +} + +func (m *RetireActualLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrpKey != nil { + l = m.ActualLrpKey.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + return n +} + +func (m *RemoveActualLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProcessGuid) + if l > 0 { + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + if m.Index != 0 { + n += 1 + sovActualLrpRequests(uint64(m.Index)) + } + if m.ActualLrpInstanceKey != nil { + l = m.ActualLrpInstanceKey.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + return n +} + +func (m *ActualLRPsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + if len(m.ActualLrps) > 0 { + for _, e := range m.ActualLrps { + l = e.Size() + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + } + return n +} + +func (m *ActualLRPsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Domain) + if l > 0 { + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + l = len(m.CellId) + if l > 0 { + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + l = len(m.ProcessGuid) + if l > 0 { + n += 1 + l + sovActualLrpRequests(uint64(l)) + } + if m.OptionalIndex != nil { + n += m.OptionalIndex.Size() + } + return n +} + +func (m *ActualLRPsRequest_Index) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovActualLrpRequests(uint64(m.Index)) + return n +} + +func sovActualLrpRequests(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozActualLrpRequests(x uint64) (n int) { + return sovActualLrpRequests(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ActualLRPLifecycleResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPLifecycleResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPGroupsResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForActualLrpGroups := "[]*ActualLRPGroup{" + for _, f := range this.ActualLrpGroups { + repeatedStringForActualLrpGroups += strings.Replace(fmt.Sprintf("%v", f), "ActualLRPGroup", "ActualLRPGroup", 1) + "," + } + repeatedStringForActualLrpGroups += "}" + s := strings.Join([]string{`&ActualLRPGroupsResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `ActualLrpGroups:` + repeatedStringForActualLrpGroups + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPGroupResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPGroupResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `ActualLrpGroup:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpGroup), "ActualLRPGroup", "ActualLRPGroup", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPGroupsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPGroupsRequest{`, + `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`, + `CellId:` + fmt.Sprintf("%v", this.CellId) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPGroupsByProcessGuidRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPGroupsByProcessGuidRequest{`, + `ProcessGuid:` + fmt.Sprintf("%v", this.ProcessGuid) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPGroupByProcessGuidAndIndexRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPGroupByProcessGuidAndIndexRequest{`, + `ProcessGuid:` + fmt.Sprintf("%v", this.ProcessGuid) + `,`, + `Index:` + fmt.Sprintf("%v", this.Index) + `,`, + `}`, + }, "") + return s +} +func (this *ClaimActualLRPRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClaimActualLRPRequest{`, + `ProcessGuid:` + fmt.Sprintf("%v", this.ProcessGuid) + `,`, + `Index:` + fmt.Sprintf("%v", this.Index) + `,`, + `ActualLrpInstanceKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpInstanceKey), "ActualLRPInstanceKey", "ActualLRPInstanceKey", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StartActualLRPRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForActualLrpInternalRoutes := "[]*ActualLRPInternalRoute{" + for _, f := range this.ActualLrpInternalRoutes { + repeatedStringForActualLrpInternalRoutes += strings.Replace(fmt.Sprintf("%v", f), "ActualLRPInternalRoute", "ActualLRPInternalRoute", 1) + "," + } + repeatedStringForActualLrpInternalRoutes += "}" + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]string{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%v: %v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + s := strings.Join([]string{`&StartActualLRPRequest{`, + `ActualLrpKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpKey), "ActualLRPKey", "ActualLRPKey", 1) + `,`, + `ActualLrpInstanceKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpInstanceKey), "ActualLRPInstanceKey", "ActualLRPInstanceKey", 1) + `,`, + `ActualLrpNetInfo:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpNetInfo), "ActualLRPNetInfo", "ActualLRPNetInfo", 1) + `,`, + `ActualLrpInternalRoutes:` + repeatedStringForActualLrpInternalRoutes + `,`, + `MetricTags:` + mapStringForMetricTags + `,`, + `OptionalRoutable:` + fmt.Sprintf("%v", this.OptionalRoutable) + `,`, + `AvailabilityZone:` + fmt.Sprintf("%v", this.AvailabilityZone) + `,`, + `}`, + }, "") + return s +} +func (this *StartActualLRPRequest_Routable) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StartActualLRPRequest_Routable{`, + `Routable:` + fmt.Sprintf("%v", this.Routable) + `,`, + `}`, + }, "") + return s +} +func (this *CrashActualLRPRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CrashActualLRPRequest{`, + `ActualLrpKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpKey), "ActualLRPKey", "ActualLRPKey", 1) + `,`, + `ActualLrpInstanceKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpInstanceKey), "ActualLRPInstanceKey", "ActualLRPInstanceKey", 1) + `,`, + `ErrorMessage:` + fmt.Sprintf("%v", this.ErrorMessage) + `,`, + `}`, + }, "") + return s +} +func (this *FailActualLRPRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FailActualLRPRequest{`, + `ActualLrpKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpKey), "ActualLRPKey", "ActualLRPKey", 1) + `,`, + `ErrorMessage:` + fmt.Sprintf("%v", this.ErrorMessage) + `,`, + `}`, + }, "") + return s +} +func (this *RetireActualLRPRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RetireActualLRPRequest{`, + `ActualLrpKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpKey), "ActualLRPKey", "ActualLRPKey", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveActualLRPRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveActualLRPRequest{`, + `ProcessGuid:` + fmt.Sprintf("%v", this.ProcessGuid) + `,`, + `Index:` + fmt.Sprintf("%v", this.Index) + `,`, + `ActualLrpInstanceKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpInstanceKey), "ActualLRPInstanceKey", "ActualLRPInstanceKey", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPsResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForActualLrps := "[]*ActualLRP{" + for _, f := range this.ActualLrps { + repeatedStringForActualLrps += strings.Replace(fmt.Sprintf("%v", f), "ActualLRP", "ActualLRP", 1) + "," + } + repeatedStringForActualLrps += "}" + s := strings.Join([]string{`&ActualLRPsResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `ActualLrps:` + repeatedStringForActualLrps + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPsRequest{`, + `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`, + `CellId:` + fmt.Sprintf("%v", this.CellId) + `,`, + `ProcessGuid:` + fmt.Sprintf("%v", this.ProcessGuid) + `,`, + `OptionalIndex:` + fmt.Sprintf("%v", this.OptionalIndex) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPsRequest_Index) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPsRequest_Index{`, + `Index:` + fmt.Sprintf("%v", this.Index) + `,`, + `}`, + }, "") + return s +} +func valueToStringActualLrpRequests(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ActualLRPLifecycleResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPLifecycleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPLifecycleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPGroupsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPGroupsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPGroupsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpGroups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ActualLrpGroups = append(m.ActualLrpGroups, &ActualLRPGroup{}) + if err := m.ActualLrpGroups[len(m.ActualLrpGroups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPGroupResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPGroupResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPGroupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpGroup == nil { + m.ActualLrpGroup = &ActualLRPGroup{} + } + if err := m.ActualLrpGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPGroupsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPGroupsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPGroupsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CellId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPGroupsByProcessGuidRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPGroupsByProcessGuidRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPGroupsByProcessGuidRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPGroupByProcessGuidAndIndexRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPGroupByProcessGuidAndIndexRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPGroupByProcessGuidAndIndexRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClaimActualLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClaimActualLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClaimActualLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpInstanceKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpInstanceKey == nil { + m.ActualLrpInstanceKey = &ActualLRPInstanceKey{} + } + if err := m.ActualLrpInstanceKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartActualLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartActualLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartActualLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpKey == nil { + m.ActualLrpKey = &ActualLRPKey{} + } + if err := m.ActualLrpKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpInstanceKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpInstanceKey == nil { + m.ActualLrpInstanceKey = &ActualLRPInstanceKey{} + } + if err := m.ActualLrpInstanceKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpNetInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpNetInfo == nil { + m.ActualLrpNetInfo = &ActualLRPNetInfo{} + } + if err := m.ActualLrpNetInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpInternalRoutes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ActualLrpInternalRoutes = append(m.ActualLrpInternalRoutes, &ActualLRPInternalRoute{}) + if err := m.ActualLrpInternalRoutes[len(m.ActualLrpInternalRoutes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricTags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricTags == nil { + m.MetricTags = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthActualLrpRequests + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthActualLrpRequests + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MetricTags[mapkey] = mapvalue + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Routable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OptionalRoutable = &StartActualLRPRequest_Routable{b} + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailabilityZone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AvailabilityZone = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CrashActualLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CrashActualLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CrashActualLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpKey == nil { + m.ActualLrpKey = &ActualLRPKey{} + } + if err := m.ActualLrpKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpInstanceKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpInstanceKey == nil { + m.ActualLrpInstanceKey = &ActualLRPInstanceKey{} + } + if err := m.ActualLrpInstanceKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ErrorMessage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FailActualLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FailActualLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FailActualLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpKey == nil { + m.ActualLrpKey = &ActualLRPKey{} + } + if err := m.ActualLrpKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ErrorMessage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RetireActualLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RetireActualLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RetireActualLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpKey == nil { + m.ActualLrpKey = &ActualLRPKey{} + } + if err := m.ActualLrpKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveActualLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveActualLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveActualLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpInstanceKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpInstanceKey == nil { + m.ActualLrpInstanceKey = &ActualLRPInstanceKey{} + } + if err := m.ActualLrpInstanceKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ActualLrps = append(m.ActualLrps, &ActualLRP{}) + if err := m.ActualLrps[len(m.ActualLrps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CellId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthActualLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthActualLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.OptionalIndex = &ActualLRPsRequest_Index{v} + default: + iNdEx = preIndex + skippy, err := skipActualLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthActualLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipActualLrpRequests(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowActualLrpRequests + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthActualLrpRequests + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupActualLrpRequests + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthActualLrpRequests + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthActualLrpRequests = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowActualLrpRequests = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupActualLrpRequests = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/actual_lrp_requests.proto b/vendor/code.cloudfoundry.org/bbs/models/actual_lrp_requests.proto new file mode 100644 index 00000000..178d6127 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/actual_lrp_requests.proto @@ -0,0 +1,94 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "actual_lrp.proto"; +import "error.proto"; + +message ActualLRPLifecycleResponse { + Error error = 1; +} + +message ActualLRPGroupsResponse { + option deprecated = true; + Error error = 1; + repeated ActualLRPGroup actual_lrp_groups = 2; +} + +message ActualLRPGroupResponse { + option deprecated = true; + Error error = 1; + ActualLRPGroup actual_lrp_group = 2; +} + +message ActualLRPGroupsRequest { + option deprecated = true; + string domain = 1 [(gogoproto.jsontag) = "domain"]; + string cell_id = 2 [(gogoproto.jsontag) = "cell_id"]; +} + +message ActualLRPGroupsByProcessGuidRequest { + option deprecated = true; + string process_guid = 1 [(gogoproto.jsontag) = "process_guid"]; +} + +message ActualLRPGroupByProcessGuidAndIndexRequest { + option deprecated = true; + string process_guid = 1 [(gogoproto.jsontag) = "process_guid"]; + int32 index = 2 [(gogoproto.jsontag) = "index"]; +} + +message ClaimActualLRPRequest { + string process_guid = 1 [(gogoproto.jsontag) = "process_guid"]; + int32 index = 2 [(gogoproto.jsontag) = "index"]; + ActualLRPInstanceKey actual_lrp_instance_key = 3; +} + +message StartActualLRPRequest { + ActualLRPKey actual_lrp_key = 1; + ActualLRPInstanceKey actual_lrp_instance_key = 2; + ActualLRPNetInfo actual_lrp_net_info = 3; + repeated ActualLRPInternalRoute actual_lrp_internal_routes = 4; + map metric_tags = 5; + oneof optional_routable { + bool Routable = 6; + } + string availability_zone = 7 [(gogoproto.jsontag)= "availability_zone"]; +} + +message CrashActualLRPRequest { + ActualLRPKey actual_lrp_key = 1; + ActualLRPInstanceKey actual_lrp_instance_key = 2; + string error_message = 3 [(gogoproto.jsontag) = "error_message"]; +} + +message FailActualLRPRequest { + ActualLRPKey actual_lrp_key = 1; + string error_message = 2 [(gogoproto.jsontag) = "error_message"]; +} + +message RetireActualLRPRequest { + ActualLRPKey actual_lrp_key = 1; +} + +message RemoveActualLRPRequest { + string process_guid = 1 [(gogoproto.jsontag) = "process_guid"]; + int32 index = 2 [(gogoproto.jsontag) = "index"]; + ActualLRPInstanceKey actual_lrp_instance_key = 3; +} + +message ActualLRPsResponse { + Error error = 1; + repeated ActualLRP actual_lrps = 2; +} + +message ActualLRPsRequest { + string domain = 1 [(gogoproto.jsontag) = "domain"]; + string cell_id = 2 [(gogoproto.jsontag) = "cell_id"]; + string process_guid = 3 [(gogoproto.jsontag) = "process_guid"]; + oneof optional_index { + int32 index = 4 [(gogoproto.jsontag) = "index"]; + } +} + diff --git a/vendor/code.cloudfoundry.org/bbs/models/bbs_presence.go b/vendor/code.cloudfoundry.org/bbs/models/bbs_presence.go new file mode 100644 index 00000000..6fbb6cc2 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/bbs_presence.go @@ -0,0 +1,38 @@ +package models + +import "net/url" + +type BBSPresence struct { + ID string `json:"id"` + URL string `json:"url"` +} + +func NewBBSPresence(id, url string) BBSPresence { + return BBSPresence{ + ID: id, + URL: url, + } +} + +func (p BBSPresence) Validate() error { + var validationError ValidationError + + if p.ID == "" { + validationError = validationError.Append(ErrInvalidField{Field: "id"}) + } + + if p.URL == "" { + validationError = validationError.Append(ErrInvalidField{Field: "url"}) + } + + url, err := url.Parse(p.URL) + if err != nil || !url.IsAbs() { + validationError = validationError.Append(ErrInvalidField{Field: "url"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/cached_dependency.go b/vendor/code.cloudfoundry.org/bbs/models/cached_dependency.go new file mode 100644 index 00000000..dca87b11 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/cached_dependency.go @@ -0,0 +1,59 @@ +package models + +import ( + "strings" + + "code.cloudfoundry.org/bbs/format" +) + +func (c *CachedDependency) Validate() error { + var validationError ValidationError + + if c.GetFrom() == "" { + validationError = validationError.Append(ErrInvalidField{"from"}) + } + + if c.GetTo() == "" { + validationError = validationError.Append(ErrInvalidField{"to"}) + } + + if c.GetChecksumValue() != "" && c.GetChecksumAlgorithm() == "" { + validationError = validationError.Append(ErrInvalidField{"checksum algorithm"}) + } + + if c.GetChecksumValue() == "" && c.GetChecksumAlgorithm() != "" { + validationError = validationError.Append(ErrInvalidField{"checksum value"}) + } + + if c.GetChecksumValue() != "" && c.GetChecksumAlgorithm() != "" { + if !contains([]string{"md5", "sha1", "sha256"}, strings.ToLower(c.GetChecksumAlgorithm())) { + validationError = validationError.Append(ErrInvalidField{"invalid algorithm"}) + } + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func validateCachedDependencies(cachedDependencies []*CachedDependency) ValidationError { + var validationError ValidationError + + if len(cachedDependencies) > 0 { + for _, cacheDep := range cachedDependencies { + err := cacheDep.Validate() + if err != nil { + validationError = validationError.Append(ErrInvalidField{"cached_dependency"}) + validationError = validationError.Append(err) + } + } + } + + return validationError +} + +func (c *CachedDependency) Version() format.Version { + return format.V0 +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/cached_dependency.pb.go b/vendor/code.cloudfoundry.org/bbs/models/cached_dependency.pb.go new file mode 100644 index 00000000..2bd79438 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/cached_dependency.pb.go @@ -0,0 +1,723 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cached_dependency.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type CachedDependency struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name"` + From string `protobuf:"bytes,2,opt,name=from,proto3" json:"from"` + To string `protobuf:"bytes,3,opt,name=to,proto3" json:"to"` + CacheKey string `protobuf:"bytes,4,opt,name=cache_key,json=cacheKey,proto3" json:"cache_key"` + LogSource string `protobuf:"bytes,5,opt,name=log_source,json=logSource,proto3" json:"log_source"` + ChecksumAlgorithm string `protobuf:"bytes,6,opt,name=checksum_algorithm,json=checksumAlgorithm,proto3" json:"checksum_algorithm,omitempty"` + ChecksumValue string `protobuf:"bytes,7,opt,name=checksum_value,json=checksumValue,proto3" json:"checksum_value,omitempty"` +} + +func (m *CachedDependency) Reset() { *m = CachedDependency{} } +func (*CachedDependency) ProtoMessage() {} +func (*CachedDependency) Descriptor() ([]byte, []int) { + return fileDescriptor_936e0e6e1c3697fa, []int{0} +} +func (m *CachedDependency) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CachedDependency) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CachedDependency.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CachedDependency) XXX_Merge(src proto.Message) { + xxx_messageInfo_CachedDependency.Merge(m, src) +} +func (m *CachedDependency) XXX_Size() int { + return m.Size() +} +func (m *CachedDependency) XXX_DiscardUnknown() { + xxx_messageInfo_CachedDependency.DiscardUnknown(m) +} + +var xxx_messageInfo_CachedDependency proto.InternalMessageInfo + +func (m *CachedDependency) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CachedDependency) GetFrom() string { + if m != nil { + return m.From + } + return "" +} + +func (m *CachedDependency) GetTo() string { + if m != nil { + return m.To + } + return "" +} + +func (m *CachedDependency) GetCacheKey() string { + if m != nil { + return m.CacheKey + } + return "" +} + +func (m *CachedDependency) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +func (m *CachedDependency) GetChecksumAlgorithm() string { + if m != nil { + return m.ChecksumAlgorithm + } + return "" +} + +func (m *CachedDependency) GetChecksumValue() string { + if m != nil { + return m.ChecksumValue + } + return "" +} + +func init() { + proto.RegisterType((*CachedDependency)(nil), "models.CachedDependency") +} + +func init() { proto.RegisterFile("cached_dependency.proto", fileDescriptor_936e0e6e1c3697fa) } + +var fileDescriptor_936e0e6e1c3697fa = []byte{ + // 326 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x90, 0xc1, 0x4a, 0xeb, 0x40, + 0x14, 0x86, 0x33, 0xb9, 0xbd, 0xb9, 0xed, 0x40, 0xcb, 0xbd, 0xb3, 0xb8, 0x0e, 0x22, 0x27, 0x22, + 0x08, 0x22, 0xb4, 0x5d, 0xe8, 0x0b, 0x58, 0xdd, 0xb9, 0x8b, 0xe0, 0x36, 0xa4, 0x93, 0x69, 0x52, + 0x9a, 0xf4, 0x94, 0x34, 0x11, 0xba, 0xf3, 0x11, 0xdc, 0xf9, 0x0a, 0x3e, 0x8a, 0xcb, 0x2e, 0xbb, + 0x0a, 0x76, 0xba, 0x91, 0xac, 0xfa, 0x08, 0x92, 0x23, 0x6d, 0xdd, 0x1c, 0xfe, 0xff, 0xff, 0xfe, + 0x73, 0x18, 0x86, 0x1f, 0xa9, 0x40, 0xc5, 0x3a, 0xf4, 0x43, 0x3d, 0xd3, 0xd3, 0x50, 0x4f, 0xd5, + 0xa2, 0x37, 0xcb, 0x30, 0x47, 0xe1, 0xa4, 0x18, 0xea, 0x64, 0x7e, 0xdc, 0x8d, 0xc6, 0x79, 0x5c, + 0x0c, 0x7b, 0x0a, 0xd3, 0x7e, 0x84, 0x11, 0xf6, 0x09, 0x0f, 0x8b, 0x11, 0x39, 0x32, 0xa4, 0xbe, + 0xd7, 0xce, 0x5e, 0x6d, 0xfe, 0xf7, 0x96, 0x4e, 0xde, 0xed, 0x2f, 0x8a, 0x13, 0xde, 0x98, 0x06, + 0xa9, 0x96, 0xec, 0x94, 0x5d, 0xb4, 0x06, 0xcd, 0xaa, 0x74, 0xc9, 0x7b, 0x34, 0x6b, 0x3a, 0xca, + 0x30, 0x95, 0xf6, 0x81, 0xd6, 0xde, 0xa3, 0x29, 0xfe, 0x73, 0x3b, 0x47, 0xf9, 0x8b, 0x98, 0x53, + 0x95, 0xae, 0x9d, 0xa3, 0x67, 0xe7, 0x28, 0x2e, 0x79, 0x8b, 0x9e, 0xee, 0x4f, 0xf4, 0x42, 0x36, + 0x08, 0xb7, 0xab, 0xd2, 0x3d, 0x84, 0x5e, 0x93, 0xe4, 0xbd, 0x5e, 0x88, 0x2e, 0xe7, 0x09, 0x46, + 0xfe, 0x1c, 0x8b, 0x4c, 0x69, 0xf9, 0x9b, 0xca, 0x9d, 0xaa, 0x74, 0x7f, 0xa4, 0x5e, 0x2b, 0xc1, + 0xe8, 0x81, 0xa4, 0xe8, 0x72, 0xa1, 0x62, 0xad, 0x26, 0xf3, 0x22, 0xf5, 0x83, 0x24, 0xc2, 0x6c, + 0x9c, 0xc7, 0xa9, 0x74, 0xea, 0x35, 0xef, 0xdf, 0x8e, 0xdc, 0xec, 0x80, 0x38, 0xe7, 0x9d, 0x7d, + 0xfd, 0x29, 0x48, 0x0a, 0x2d, 0xff, 0x50, 0xb5, 0xbd, 0x4b, 0x1f, 0xeb, 0x70, 0x70, 0xbd, 0x5c, + 0x03, 0x5b, 0xad, 0xc1, 0xda, 0xae, 0x81, 0x3d, 0x1b, 0x60, 0x6f, 0x06, 0xd8, 0xbb, 0x01, 0xb6, + 0x34, 0xc0, 0x3e, 0x0c, 0xb0, 0x4f, 0x03, 0xd6, 0xd6, 0x00, 0x7b, 0xd9, 0x80, 0xb5, 0xdc, 0x80, + 0xb5, 0xda, 0x80, 0x35, 0x74, 0xe8, 0x5b, 0xaf, 0xbe, 0x02, 0x00, 0x00, 0xff, 0xff, 0x10, 0xce, + 0x51, 0x13, 0xa8, 0x01, 0x00, 0x00, +} + +func (this *CachedDependency) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CachedDependency) + if !ok { + that2, ok := that.(CachedDependency) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.From != that1.From { + return false + } + if this.To != that1.To { + return false + } + if this.CacheKey != that1.CacheKey { + return false + } + if this.LogSource != that1.LogSource { + return false + } + if this.ChecksumAlgorithm != that1.ChecksumAlgorithm { + return false + } + if this.ChecksumValue != that1.ChecksumValue { + return false + } + return true +} +func (this *CachedDependency) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&models.CachedDependency{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "From: "+fmt.Sprintf("%#v", this.From)+",\n") + s = append(s, "To: "+fmt.Sprintf("%#v", this.To)+",\n") + s = append(s, "CacheKey: "+fmt.Sprintf("%#v", this.CacheKey)+",\n") + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "ChecksumAlgorithm: "+fmt.Sprintf("%#v", this.ChecksumAlgorithm)+",\n") + s = append(s, "ChecksumValue: "+fmt.Sprintf("%#v", this.ChecksumValue)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringCachedDependency(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *CachedDependency) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CachedDependency) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CachedDependency) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChecksumValue) > 0 { + i -= len(m.ChecksumValue) + copy(dAtA[i:], m.ChecksumValue) + i = encodeVarintCachedDependency(dAtA, i, uint64(len(m.ChecksumValue))) + i-- + dAtA[i] = 0x3a + } + if len(m.ChecksumAlgorithm) > 0 { + i -= len(m.ChecksumAlgorithm) + copy(dAtA[i:], m.ChecksumAlgorithm) + i = encodeVarintCachedDependency(dAtA, i, uint64(len(m.ChecksumAlgorithm))) + i-- + dAtA[i] = 0x32 + } + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintCachedDependency(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x2a + } + if len(m.CacheKey) > 0 { + i -= len(m.CacheKey) + copy(dAtA[i:], m.CacheKey) + i = encodeVarintCachedDependency(dAtA, i, uint64(len(m.CacheKey))) + i-- + dAtA[i] = 0x22 + } + if len(m.To) > 0 { + i -= len(m.To) + copy(dAtA[i:], m.To) + i = encodeVarintCachedDependency(dAtA, i, uint64(len(m.To))) + i-- + dAtA[i] = 0x1a + } + if len(m.From) > 0 { + i -= len(m.From) + copy(dAtA[i:], m.From) + i = encodeVarintCachedDependency(dAtA, i, uint64(len(m.From))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintCachedDependency(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintCachedDependency(dAtA []byte, offset int, v uint64) int { + offset -= sovCachedDependency(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CachedDependency) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovCachedDependency(uint64(l)) + } + l = len(m.From) + if l > 0 { + n += 1 + l + sovCachedDependency(uint64(l)) + } + l = len(m.To) + if l > 0 { + n += 1 + l + sovCachedDependency(uint64(l)) + } + l = len(m.CacheKey) + if l > 0 { + n += 1 + l + sovCachedDependency(uint64(l)) + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovCachedDependency(uint64(l)) + } + l = len(m.ChecksumAlgorithm) + if l > 0 { + n += 1 + l + sovCachedDependency(uint64(l)) + } + l = len(m.ChecksumValue) + if l > 0 { + n += 1 + l + sovCachedDependency(uint64(l)) + } + return n +} + +func sovCachedDependency(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozCachedDependency(x uint64) (n int) { + return sovCachedDependency(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CachedDependency) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CachedDependency{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `From:` + fmt.Sprintf("%v", this.From) + `,`, + `To:` + fmt.Sprintf("%v", this.To) + `,`, + `CacheKey:` + fmt.Sprintf("%v", this.CacheKey) + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `ChecksumAlgorithm:` + fmt.Sprintf("%v", this.ChecksumAlgorithm) + `,`, + `ChecksumValue:` + fmt.Sprintf("%v", this.ChecksumValue) + `,`, + `}`, + }, "") + return s +} +func valueToStringCachedDependency(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CachedDependency) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCachedDependency + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CachedDependency: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CachedDependency: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCachedDependency + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCachedDependency + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCachedDependency + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCachedDependency + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCachedDependency + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCachedDependency + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.From = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCachedDependency + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCachedDependency + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCachedDependency + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.To = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CacheKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCachedDependency + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCachedDependency + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCachedDependency + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CacheKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCachedDependency + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCachedDependency + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCachedDependency + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChecksumAlgorithm", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCachedDependency + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCachedDependency + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCachedDependency + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChecksumAlgorithm = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChecksumValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCachedDependency + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCachedDependency + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCachedDependency + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChecksumValue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCachedDependency(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCachedDependency + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCachedDependency(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCachedDependency + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCachedDependency + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCachedDependency + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthCachedDependency + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupCachedDependency + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthCachedDependency + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthCachedDependency = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCachedDependency = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupCachedDependency = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/cached_dependency.proto b/vendor/code.cloudfoundry.org/bbs/models/cached_dependency.proto new file mode 100644 index 00000000..daea97e2 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/cached_dependency.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.goproto_enum_prefix_all) = true; + +message CachedDependency { + string name = 1 [(gogoproto.jsontag) = "name"]; + string from = 2 [(gogoproto.jsontag) = "from"]; + string to = 3 [(gogoproto.jsontag) = "to"]; + string cache_key = 4 [(gogoproto.jsontag) = "cache_key"]; + string log_source = 5 [(gogoproto.jsontag) = "log_source"]; + string checksum_algorithm = 6; + string checksum_value = 7; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/cell_presence.go b/vendor/code.cloudfoundry.org/bbs/models/cell_presence.go new file mode 100644 index 00000000..9c940670 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/cell_presence.go @@ -0,0 +1,153 @@ +package models + +import "strings" + +type CellSet map[string]*CellPresence + +func NewCellSet() CellSet { + return make(CellSet) +} + +func NewCellSetFromList(cells []*CellPresence) CellSet { + cellSet := NewCellSet() + for _, v := range cells { + cellSet.Add(v) + } + return cellSet +} + +func (set CellSet) Add(cell *CellPresence) { + set[cell.CellId] = cell +} + +func (set CellSet) Each(predicate func(cell *CellPresence)) { + for _, cell := range set { + predicate(cell) + } +} + +func (set CellSet) HasCellID(cellID string) bool { + _, ok := set[cellID] + return ok +} + +func (set CellSet) CellIDs() []string { + keys := make([]string, 0, len(set)) + for k := range set { + keys = append(keys, k) + } + return keys +} + +func NewCellCapacity(memoryMB, diskMB, containers int32) CellCapacity { + return CellCapacity{ + MemoryMb: memoryMB, + DiskMb: diskMB, + Containers: containers, + } +} + +func (cap CellCapacity) Validate() error { + var validationError ValidationError + + if cap.MemoryMb <= 0 { + validationError = validationError.Append(ErrInvalidField{"memory_mb"}) + } + + if cap.DiskMb < 0 { + validationError = validationError.Append(ErrInvalidField{"disk_mb"}) + } + + if cap.Containers <= 0 { + validationError = validationError.Append(ErrInvalidField{"containers"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func NewCellPresence( + cellID, repAddress, repUrl, zone string, + capacity CellCapacity, + rootFSProviders, preloadedRootFSes, placementTags, optionalPlacementTags []string, +) CellPresence { + var providers []*Provider + var pProviders []string + pProviders = append(pProviders, preloadedRootFSes...) + providers = append(providers, &Provider{PreloadedRootFSScheme, pProviders}) + providers = append(providers, &Provider{PreloadedOCIRootFSScheme, pProviders}) + + for _, prov := range rootFSProviders { + providers = append(providers, &Provider{prov, []string{}}) + } + + return CellPresence{ + CellId: cellID, + RepAddress: repAddress, + RepUrl: repUrl, + Zone: zone, + Capacity: &capacity, + RootfsProviders: providers, + PlacementTags: placementTags, + OptionalPlacementTags: optionalPlacementTags, + } +} + +func (c CellPresence) Validate() error { + var validationError ValidationError + + if c.CellId == "" { + validationError = validationError.Append(ErrInvalidField{"cell_id"}) + } + + if c.RepAddress == "" { + validationError = validationError.Append(ErrInvalidField{"rep_address"}) + } + + if c.RepUrl != "" && !strings.HasPrefix(c.RepUrl, "http://") && !strings.HasPrefix(c.RepUrl, "https://") { + validationError = validationError.Append(ErrInvalidField{"rep_url"}) + } + + if err := c.Capacity.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +const ( + EventTypeCellDisappeared = "cell_disappeared" +) + +type CellEvent interface { + EventType() string + CellIDs() []string +} + +type CellDisappearedEvent struct { + IDs []string +} + +func NewCellDisappearedEvent(ids []string) CellDisappearedEvent { + return CellDisappearedEvent{ids} +} + +func (CellDisappearedEvent) EventType() string { + return EventTypeCellDisappeared +} + +func (e CellDisappearedEvent) CellIDs() []string { + return e.IDs +} + +func (c *CellPresence) Copy() *CellPresence { + newCellPresense := *c + return &newCellPresense +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/cells.pb.go b/vendor/code.cloudfoundry.org/bbs/models/cells.pb.go new file mode 100644 index 00000000..36d9d7b7 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/cells.pb.go @@ -0,0 +1,1703 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cells.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type CellCapacity struct { + MemoryMb int32 `protobuf:"varint,1,opt,name=memory_mb,json=memoryMb,proto3" json:"memory_mb"` + DiskMb int32 `protobuf:"varint,2,opt,name=disk_mb,json=diskMb,proto3" json:"disk_mb"` + Containers int32 `protobuf:"varint,3,opt,name=containers,proto3" json:"containers"` +} + +func (m *CellCapacity) Reset() { *m = CellCapacity{} } +func (*CellCapacity) ProtoMessage() {} +func (*CellCapacity) Descriptor() ([]byte, []int) { + return fileDescriptor_842e821272d22ff7, []int{0} +} +func (m *CellCapacity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CellCapacity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CellCapacity.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CellCapacity) XXX_Merge(src proto.Message) { + xxx_messageInfo_CellCapacity.Merge(m, src) +} +func (m *CellCapacity) XXX_Size() int { + return m.Size() +} +func (m *CellCapacity) XXX_DiscardUnknown() { + xxx_messageInfo_CellCapacity.DiscardUnknown(m) +} + +var xxx_messageInfo_CellCapacity proto.InternalMessageInfo + +func (m *CellCapacity) GetMemoryMb() int32 { + if m != nil { + return m.MemoryMb + } + return 0 +} + +func (m *CellCapacity) GetDiskMb() int32 { + if m != nil { + return m.DiskMb + } + return 0 +} + +func (m *CellCapacity) GetContainers() int32 { + if m != nil { + return m.Containers + } + return 0 +} + +type CellPresence struct { + CellId string `protobuf:"bytes,1,opt,name=cell_id,json=cellId,proto3" json:"cell_id"` + RepAddress string `protobuf:"bytes,2,opt,name=rep_address,json=repAddress,proto3" json:"rep_address"` + Zone string `protobuf:"bytes,3,opt,name=zone,proto3" json:"zone"` + Capacity *CellCapacity `protobuf:"bytes,4,opt,name=capacity,proto3" json:"capacity,omitempty"` + RootfsProviders []*Provider `protobuf:"bytes,5,rep,name=rootfs_providers,json=rootfsProviders,proto3" json:"rootfs_provider_list,omitempty"` + PlacementTags []string `protobuf:"bytes,6,rep,name=placement_tags,json=placementTags,proto3" json:"placement_tags,omitempty"` + OptionalPlacementTags []string `protobuf:"bytes,7,rep,name=optional_placement_tags,json=optionalPlacementTags,proto3" json:"optional_placement_tags,omitempty"` + RepUrl string `protobuf:"bytes,8,opt,name=rep_url,json=repUrl,proto3" json:"rep_url"` +} + +func (m *CellPresence) Reset() { *m = CellPresence{} } +func (*CellPresence) ProtoMessage() {} +func (*CellPresence) Descriptor() ([]byte, []int) { + return fileDescriptor_842e821272d22ff7, []int{1} +} +func (m *CellPresence) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CellPresence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CellPresence.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CellPresence) XXX_Merge(src proto.Message) { + xxx_messageInfo_CellPresence.Merge(m, src) +} +func (m *CellPresence) XXX_Size() int { + return m.Size() +} +func (m *CellPresence) XXX_DiscardUnknown() { + xxx_messageInfo_CellPresence.DiscardUnknown(m) +} + +var xxx_messageInfo_CellPresence proto.InternalMessageInfo + +func (m *CellPresence) GetCellId() string { + if m != nil { + return m.CellId + } + return "" +} + +func (m *CellPresence) GetRepAddress() string { + if m != nil { + return m.RepAddress + } + return "" +} + +func (m *CellPresence) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *CellPresence) GetCapacity() *CellCapacity { + if m != nil { + return m.Capacity + } + return nil +} + +func (m *CellPresence) GetRootfsProviders() []*Provider { + if m != nil { + return m.RootfsProviders + } + return nil +} + +func (m *CellPresence) GetPlacementTags() []string { + if m != nil { + return m.PlacementTags + } + return nil +} + +func (m *CellPresence) GetOptionalPlacementTags() []string { + if m != nil { + return m.OptionalPlacementTags + } + return nil +} + +func (m *CellPresence) GetRepUrl() string { + if m != nil { + return m.RepUrl + } + return "" +} + +type Provider struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name"` + Properties []string `protobuf:"bytes,2,rep,name=properties,proto3" json:"properties,omitempty"` +} + +func (m *Provider) Reset() { *m = Provider{} } +func (*Provider) ProtoMessage() {} +func (*Provider) Descriptor() ([]byte, []int) { + return fileDescriptor_842e821272d22ff7, []int{2} +} +func (m *Provider) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Provider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Provider.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Provider) XXX_Merge(src proto.Message) { + xxx_messageInfo_Provider.Merge(m, src) +} +func (m *Provider) XXX_Size() int { + return m.Size() +} +func (m *Provider) XXX_DiscardUnknown() { + xxx_messageInfo_Provider.DiscardUnknown(m) +} + +var xxx_messageInfo_Provider proto.InternalMessageInfo + +func (m *Provider) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Provider) GetProperties() []string { + if m != nil { + return m.Properties + } + return nil +} + +type CellsResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + Cells []*CellPresence `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` +} + +func (m *CellsResponse) Reset() { *m = CellsResponse{} } +func (*CellsResponse) ProtoMessage() {} +func (*CellsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_842e821272d22ff7, []int{3} +} +func (m *CellsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CellsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CellsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CellsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CellsResponse.Merge(m, src) +} +func (m *CellsResponse) XXX_Size() int { + return m.Size() +} +func (m *CellsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CellsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CellsResponse proto.InternalMessageInfo + +func (m *CellsResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *CellsResponse) GetCells() []*CellPresence { + if m != nil { + return m.Cells + } + return nil +} + +func init() { + proto.RegisterType((*CellCapacity)(nil), "models.CellCapacity") + proto.RegisterType((*CellPresence)(nil), "models.CellPresence") + proto.RegisterType((*Provider)(nil), "models.Provider") + proto.RegisterType((*CellsResponse)(nil), "models.CellsResponse") +} + +func init() { proto.RegisterFile("cells.proto", fileDescriptor_842e821272d22ff7) } + +var fileDescriptor_842e821272d22ff7 = []byte{ + // 548 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x53, 0x4f, 0x6f, 0xd3, 0x30, + 0x14, 0x4f, 0xe8, 0x9a, 0xb5, 0x0e, 0xdd, 0x26, 0x0b, 0x44, 0x35, 0x21, 0xa7, 0x2a, 0x43, 0xaa, + 0x26, 0xe8, 0xa6, 0x81, 0xb8, 0xd3, 0x09, 0x09, 0x0e, 0x93, 0x26, 0x0b, 0xce, 0x21, 0x7f, 0xde, + 0x4a, 0x44, 0x12, 0x5b, 0xb6, 0x8b, 0x54, 0x4e, 0x7c, 0x84, 0x7d, 0x00, 0x3e, 0x00, 0x1f, 0x85, + 0x63, 0x8f, 0x3b, 0x45, 0x34, 0xbd, 0xa0, 0x9c, 0xf6, 0x11, 0x90, 0x9d, 0x66, 0x2b, 0xbd, 0x58, + 0xbf, 0xf7, 0x7b, 0xbf, 0x67, 0x3f, 0xff, 0x9e, 0x8d, 0xdc, 0x08, 0xd2, 0x54, 0x8e, 0xb9, 0x60, + 0x8a, 0x61, 0x27, 0x63, 0x31, 0xa4, 0xf2, 0xf0, 0xe5, 0x34, 0x51, 0x5f, 0x66, 0xe1, 0x38, 0x62, + 0xd9, 0xc9, 0x94, 0x4d, 0xd9, 0x89, 0x49, 0x87, 0xb3, 0x2b, 0x13, 0x99, 0xc0, 0xa0, 0xba, 0xec, + 0xd0, 0x05, 0x21, 0x98, 0xa8, 0x83, 0xe1, 0xb5, 0x8d, 0x1e, 0x9e, 0x43, 0x9a, 0x9e, 0x07, 0x3c, + 0x88, 0x12, 0x35, 0xc7, 0xc7, 0xa8, 0x9b, 0x41, 0xc6, 0xc4, 0xdc, 0xcf, 0xc2, 0xbe, 0x3d, 0xb0, + 0x47, 0xed, 0x49, 0xaf, 0x2a, 0xbc, 0x7b, 0x92, 0x76, 0x6a, 0x78, 0x11, 0xe2, 0x23, 0xb4, 0x1b, + 0x27, 0xf2, 0xab, 0x56, 0x3e, 0x30, 0x4a, 0xb7, 0x2a, 0xbc, 0x86, 0xa2, 0x8e, 0x06, 0x17, 0x21, + 0x1e, 0x23, 0x14, 0xb1, 0x5c, 0x05, 0x49, 0x0e, 0x42, 0xf6, 0x5b, 0x46, 0xb8, 0x57, 0x15, 0xde, + 0x06, 0x4b, 0x37, 0xf0, 0xf0, 0x67, 0xab, 0x6e, 0xe9, 0x52, 0x80, 0x84, 0x3c, 0x02, 0x7d, 0x8c, + 0xbe, 0xb6, 0x9f, 0xc4, 0xa6, 0xa1, 0x6e, 0x7d, 0xcc, 0x9a, 0xa2, 0x8e, 0x06, 0x1f, 0x62, 0x7c, + 0x8a, 0x5c, 0x01, 0xdc, 0x0f, 0xe2, 0x58, 0x80, 0x94, 0xa6, 0xa1, 0xee, 0x64, 0xbf, 0x2a, 0xbc, + 0x4d, 0x9a, 0x22, 0x01, 0xfc, 0x6d, 0x8d, 0xf1, 0x53, 0xb4, 0xf3, 0x9d, 0xe5, 0x60, 0x5a, 0xea, + 0x4e, 0x3a, 0x55, 0xe1, 0x99, 0x98, 0x9a, 0x15, 0x9f, 0xa2, 0x4e, 0xb4, 0x36, 0xa5, 0xbf, 0x33, + 0xb0, 0x47, 0xee, 0xd9, 0xa3, 0x71, 0x6d, 0xf8, 0x78, 0xd3, 0x30, 0x7a, 0xa7, 0xc2, 0x3e, 0x3a, + 0x10, 0x8c, 0xa9, 0x2b, 0xe9, 0x73, 0xc1, 0xbe, 0x25, 0xb1, 0xbe, 0x6e, 0x7b, 0xd0, 0x1a, 0xb9, + 0x67, 0x07, 0x4d, 0xe5, 0xe5, 0x3a, 0x31, 0x19, 0x56, 0x85, 0x47, 0xb6, 0xd4, 0x7e, 0x9a, 0x48, + 0xf5, 0x82, 0x65, 0x89, 0x82, 0x8c, 0xab, 0x39, 0xdd, 0xaf, 0xf3, 0x4d, 0x8d, 0xc4, 0xcf, 0xd1, + 0x1e, 0x4f, 0x83, 0x08, 0x32, 0xc8, 0x95, 0xaf, 0x82, 0xa9, 0xec, 0x3b, 0x83, 0xd6, 0xa8, 0x4b, + 0x7b, 0x77, 0xec, 0xc7, 0x60, 0x2a, 0xf1, 0x1b, 0xf4, 0x84, 0x71, 0x95, 0xb0, 0x3c, 0x48, 0xfd, + 0x2d, 0xfd, 0xae, 0xd1, 0x3f, 0x6e, 0xd2, 0x97, 0xff, 0xd5, 0x1d, 0xa1, 0x5d, 0x6d, 0xd5, 0x4c, + 0xa4, 0xfd, 0xce, 0xbd, 0xcf, 0x6b, 0x8a, 0x3a, 0x02, 0xf8, 0x27, 0x91, 0x0e, 0xdf, 0xa3, 0x4e, + 0xd3, 0x91, 0x76, 0x30, 0x0f, 0x32, 0x58, 0x8f, 0xc5, 0x38, 0xa8, 0x63, 0x6a, 0x56, 0x4c, 0x10, + 0xe2, 0x82, 0x71, 0x10, 0x2a, 0x01, 0x3d, 0x10, 0x7d, 0xf4, 0x06, 0x33, 0xfc, 0x8c, 0x7a, 0xda, + 0x49, 0x49, 0x41, 0x72, 0x96, 0x4b, 0xc0, 0xcf, 0x50, 0xdb, 0xbc, 0x4d, 0xb3, 0x9f, 0x7b, 0xd6, + 0x6b, 0x5c, 0x7b, 0xa7, 0x49, 0x5a, 0xe7, 0xf0, 0x31, 0x6a, 0x9b, 0x4f, 0x60, 0x36, 0xdc, 0x1a, + 0x4a, 0xf3, 0x64, 0x68, 0x2d, 0x99, 0xbc, 0x5e, 0x2c, 0x89, 0x75, 0xb3, 0x24, 0xd6, 0xed, 0x92, + 0xd8, 0x3f, 0x4a, 0x62, 0xff, 0x2a, 0x89, 0xfd, 0xbb, 0x24, 0xf6, 0xa2, 0x24, 0xf6, 0x9f, 0x92, + 0xd8, 0x7f, 0x4b, 0x62, 0xdd, 0x96, 0xc4, 0xbe, 0x5e, 0x11, 0x6b, 0xb1, 0x22, 0xd6, 0xcd, 0x8a, + 0x58, 0xa1, 0x63, 0xbe, 0xc6, 0xab, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x12, 0x8c, 0x77, 0x8d, + 0x6d, 0x03, 0x00, 0x00, +} + +func (this *CellCapacity) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CellCapacity) + if !ok { + that2, ok := that.(CellCapacity) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.MemoryMb != that1.MemoryMb { + return false + } + if this.DiskMb != that1.DiskMb { + return false + } + if this.Containers != that1.Containers { + return false + } + return true +} +func (this *CellPresence) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CellPresence) + if !ok { + that2, ok := that.(CellPresence) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.CellId != that1.CellId { + return false + } + if this.RepAddress != that1.RepAddress { + return false + } + if this.Zone != that1.Zone { + return false + } + if !this.Capacity.Equal(that1.Capacity) { + return false + } + if len(this.RootfsProviders) != len(that1.RootfsProviders) { + return false + } + for i := range this.RootfsProviders { + if !this.RootfsProviders[i].Equal(that1.RootfsProviders[i]) { + return false + } + } + if len(this.PlacementTags) != len(that1.PlacementTags) { + return false + } + for i := range this.PlacementTags { + if this.PlacementTags[i] != that1.PlacementTags[i] { + return false + } + } + if len(this.OptionalPlacementTags) != len(that1.OptionalPlacementTags) { + return false + } + for i := range this.OptionalPlacementTags { + if this.OptionalPlacementTags[i] != that1.OptionalPlacementTags[i] { + return false + } + } + if this.RepUrl != that1.RepUrl { + return false + } + return true +} +func (this *Provider) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Provider) + if !ok { + that2, ok := that.(Provider) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Properties) != len(that1.Properties) { + return false + } + for i := range this.Properties { + if this.Properties[i] != that1.Properties[i] { + return false + } + } + return true +} +func (this *CellsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CellsResponse) + if !ok { + that2, ok := that.(CellsResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + if len(this.Cells) != len(that1.Cells) { + return false + } + for i := range this.Cells { + if !this.Cells[i].Equal(that1.Cells[i]) { + return false + } + } + return true +} +func (this *CellCapacity) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&models.CellCapacity{") + s = append(s, "MemoryMb: "+fmt.Sprintf("%#v", this.MemoryMb)+",\n") + s = append(s, "DiskMb: "+fmt.Sprintf("%#v", this.DiskMb)+",\n") + s = append(s, "Containers: "+fmt.Sprintf("%#v", this.Containers)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CellPresence) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 12) + s = append(s, "&models.CellPresence{") + s = append(s, "CellId: "+fmt.Sprintf("%#v", this.CellId)+",\n") + s = append(s, "RepAddress: "+fmt.Sprintf("%#v", this.RepAddress)+",\n") + s = append(s, "Zone: "+fmt.Sprintf("%#v", this.Zone)+",\n") + if this.Capacity != nil { + s = append(s, "Capacity: "+fmt.Sprintf("%#v", this.Capacity)+",\n") + } + if this.RootfsProviders != nil { + s = append(s, "RootfsProviders: "+fmt.Sprintf("%#v", this.RootfsProviders)+",\n") + } + s = append(s, "PlacementTags: "+fmt.Sprintf("%#v", this.PlacementTags)+",\n") + s = append(s, "OptionalPlacementTags: "+fmt.Sprintf("%#v", this.OptionalPlacementTags)+",\n") + s = append(s, "RepUrl: "+fmt.Sprintf("%#v", this.RepUrl)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Provider) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.Provider{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Properties: "+fmt.Sprintf("%#v", this.Properties)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CellsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.CellsResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + if this.Cells != nil { + s = append(s, "Cells: "+fmt.Sprintf("%#v", this.Cells)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringCells(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *CellCapacity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CellCapacity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CellCapacity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Containers != 0 { + i = encodeVarintCells(dAtA, i, uint64(m.Containers)) + i-- + dAtA[i] = 0x18 + } + if m.DiskMb != 0 { + i = encodeVarintCells(dAtA, i, uint64(m.DiskMb)) + i-- + dAtA[i] = 0x10 + } + if m.MemoryMb != 0 { + i = encodeVarintCells(dAtA, i, uint64(m.MemoryMb)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CellPresence) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CellPresence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CellPresence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RepUrl) > 0 { + i -= len(m.RepUrl) + copy(dAtA[i:], m.RepUrl) + i = encodeVarintCells(dAtA, i, uint64(len(m.RepUrl))) + i-- + dAtA[i] = 0x42 + } + if len(m.OptionalPlacementTags) > 0 { + for iNdEx := len(m.OptionalPlacementTags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.OptionalPlacementTags[iNdEx]) + copy(dAtA[i:], m.OptionalPlacementTags[iNdEx]) + i = encodeVarintCells(dAtA, i, uint64(len(m.OptionalPlacementTags[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.PlacementTags) > 0 { + for iNdEx := len(m.PlacementTags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PlacementTags[iNdEx]) + copy(dAtA[i:], m.PlacementTags[iNdEx]) + i = encodeVarintCells(dAtA, i, uint64(len(m.PlacementTags[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.RootfsProviders) > 0 { + for iNdEx := len(m.RootfsProviders) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RootfsProviders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCells(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.Capacity != nil { + { + size, err := m.Capacity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCells(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Zone) > 0 { + i -= len(m.Zone) + copy(dAtA[i:], m.Zone) + i = encodeVarintCells(dAtA, i, uint64(len(m.Zone))) + i-- + dAtA[i] = 0x1a + } + if len(m.RepAddress) > 0 { + i -= len(m.RepAddress) + copy(dAtA[i:], m.RepAddress) + i = encodeVarintCells(dAtA, i, uint64(len(m.RepAddress))) + i-- + dAtA[i] = 0x12 + } + if len(m.CellId) > 0 { + i -= len(m.CellId) + copy(dAtA[i:], m.CellId) + i = encodeVarintCells(dAtA, i, uint64(len(m.CellId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Provider) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Provider) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Provider) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Properties) > 0 { + for iNdEx := len(m.Properties) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Properties[iNdEx]) + copy(dAtA[i:], m.Properties[iNdEx]) + i = encodeVarintCells(dAtA, i, uint64(len(m.Properties[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintCells(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CellsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CellsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CellsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Cells[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCells(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCells(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintCells(dAtA []byte, offset int, v uint64) int { + offset -= sovCells(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CellCapacity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MemoryMb != 0 { + n += 1 + sovCells(uint64(m.MemoryMb)) + } + if m.DiskMb != 0 { + n += 1 + sovCells(uint64(m.DiskMb)) + } + if m.Containers != 0 { + n += 1 + sovCells(uint64(m.Containers)) + } + return n +} + +func (m *CellPresence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CellId) + if l > 0 { + n += 1 + l + sovCells(uint64(l)) + } + l = len(m.RepAddress) + if l > 0 { + n += 1 + l + sovCells(uint64(l)) + } + l = len(m.Zone) + if l > 0 { + n += 1 + l + sovCells(uint64(l)) + } + if m.Capacity != nil { + l = m.Capacity.Size() + n += 1 + l + sovCells(uint64(l)) + } + if len(m.RootfsProviders) > 0 { + for _, e := range m.RootfsProviders { + l = e.Size() + n += 1 + l + sovCells(uint64(l)) + } + } + if len(m.PlacementTags) > 0 { + for _, s := range m.PlacementTags { + l = len(s) + n += 1 + l + sovCells(uint64(l)) + } + } + if len(m.OptionalPlacementTags) > 0 { + for _, s := range m.OptionalPlacementTags { + l = len(s) + n += 1 + l + sovCells(uint64(l)) + } + } + l = len(m.RepUrl) + if l > 0 { + n += 1 + l + sovCells(uint64(l)) + } + return n +} + +func (m *Provider) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovCells(uint64(l)) + } + if len(m.Properties) > 0 { + for _, s := range m.Properties { + l = len(s) + n += 1 + l + sovCells(uint64(l)) + } + } + return n +} + +func (m *CellsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovCells(uint64(l)) + } + if len(m.Cells) > 0 { + for _, e := range m.Cells { + l = e.Size() + n += 1 + l + sovCells(uint64(l)) + } + } + return n +} + +func sovCells(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozCells(x uint64) (n int) { + return sovCells(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CellCapacity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CellCapacity{`, + `MemoryMb:` + fmt.Sprintf("%v", this.MemoryMb) + `,`, + `DiskMb:` + fmt.Sprintf("%v", this.DiskMb) + `,`, + `Containers:` + fmt.Sprintf("%v", this.Containers) + `,`, + `}`, + }, "") + return s +} +func (this *CellPresence) String() string { + if this == nil { + return "nil" + } + repeatedStringForRootfsProviders := "[]*Provider{" + for _, f := range this.RootfsProviders { + repeatedStringForRootfsProviders += strings.Replace(f.String(), "Provider", "Provider", 1) + "," + } + repeatedStringForRootfsProviders += "}" + s := strings.Join([]string{`&CellPresence{`, + `CellId:` + fmt.Sprintf("%v", this.CellId) + `,`, + `RepAddress:` + fmt.Sprintf("%v", this.RepAddress) + `,`, + `Zone:` + fmt.Sprintf("%v", this.Zone) + `,`, + `Capacity:` + strings.Replace(this.Capacity.String(), "CellCapacity", "CellCapacity", 1) + `,`, + `RootfsProviders:` + repeatedStringForRootfsProviders + `,`, + `PlacementTags:` + fmt.Sprintf("%v", this.PlacementTags) + `,`, + `OptionalPlacementTags:` + fmt.Sprintf("%v", this.OptionalPlacementTags) + `,`, + `RepUrl:` + fmt.Sprintf("%v", this.RepUrl) + `,`, + `}`, + }, "") + return s +} +func (this *Provider) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Provider{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Properties:` + fmt.Sprintf("%v", this.Properties) + `,`, + `}`, + }, "") + return s +} +func (this *CellsResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForCells := "[]*CellPresence{" + for _, f := range this.Cells { + repeatedStringForCells += strings.Replace(f.String(), "CellPresence", "CellPresence", 1) + "," + } + repeatedStringForCells += "}" + s := strings.Join([]string{`&CellsResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `Cells:` + repeatedStringForCells + `,`, + `}`, + }, "") + return s +} +func valueToStringCells(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CellCapacity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CellCapacity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CellCapacity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryMb", wireType) + } + m.MemoryMb = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemoryMb |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiskMb", wireType) + } + m.DiskMb = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DiskMb |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + m.Containers = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Containers |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipCells(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCells + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CellPresence) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CellPresence: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CellPresence: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCells + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCells + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CellId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RepAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCells + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCells + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RepAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Zone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCells + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCells + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Zone = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCells + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCells + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capacity == nil { + m.Capacity = &CellCapacity{} + } + if err := m.Capacity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootfsProviders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCells + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCells + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RootfsProviders = append(m.RootfsProviders, &Provider{}) + if err := m.RootfsProviders[len(m.RootfsProviders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PlacementTags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCells + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCells + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PlacementTags = append(m.PlacementTags, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OptionalPlacementTags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCells + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCells + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OptionalPlacementTags = append(m.OptionalPlacementTags, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RepUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCells + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCells + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RepUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCells(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCells + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Provider) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Provider: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Provider: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCells + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCells + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Properties", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCells + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCells + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Properties = append(m.Properties, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCells(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCells + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CellsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CellsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CellsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCells + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCells + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCells + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCells + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCells + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, &CellPresence{}) + if err := m.Cells[len(m.Cells)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCells(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCells + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCells(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCells + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCells + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCells + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthCells + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupCells + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthCells + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthCells = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCells = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupCells = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/cells.proto b/vendor/code.cloudfoundry.org/bbs/models/cells.proto new file mode 100644 index 00000000..e73a028a --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/cells.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "error.proto"; + +message CellCapacity { + int32 memory_mb = 1 [(gogoproto.jsontag) = "memory_mb"]; + int32 disk_mb = 2 [(gogoproto.jsontag) = "disk_mb"]; + int32 containers = 3 [(gogoproto.jsontag) = "containers"]; +} + +message CellPresence { + string cell_id = 1 [(gogoproto.jsontag) = "cell_id"]; + string rep_address = 2 [(gogoproto.jsontag) = "rep_address"]; + string zone = 3 [(gogoproto.jsontag) = "zone"]; + CellCapacity capacity = 4; + repeated Provider rootfs_providers = 5 [(gogoproto.jsontag) = "rootfs_provider_list,omitempty"]; + repeated string placement_tags = 6; + repeated string optional_placement_tags = 7; + string rep_url = 8 [(gogoproto.jsontag) = "rep_url"]; +} + +message Provider { + string name = 1 [(gogoproto.jsontag) = "name"]; + repeated string properties = 2; +} + +message CellsResponse { + Error error = 1; + repeated CellPresence cells = 2; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/certificate_properties.pb.go b/vendor/code.cloudfoundry.org/bbs/models/certificate_properties.pb.go new file mode 100644 index 00000000..7815f28f --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/certificate_properties.pb.go @@ -0,0 +1,385 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: certificate_properties.proto + +package models + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type CertificateProperties struct { + OrganizationalUnit []string `protobuf:"bytes,1,rep,name=organizational_unit,json=organizationalUnit,proto3" json:"organizational_unit,omitempty"` +} + +func (m *CertificateProperties) Reset() { *m = CertificateProperties{} } +func (*CertificateProperties) ProtoMessage() {} +func (*CertificateProperties) Descriptor() ([]byte, []int) { + return fileDescriptor_9291b57c1fe01997, []int{0} +} +func (m *CertificateProperties) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateProperties) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CertificateProperties.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CertificateProperties) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateProperties.Merge(m, src) +} +func (m *CertificateProperties) XXX_Size() int { + return m.Size() +} +func (m *CertificateProperties) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateProperties.DiscardUnknown(m) +} + +var xxx_messageInfo_CertificateProperties proto.InternalMessageInfo + +func (m *CertificateProperties) GetOrganizationalUnit() []string { + if m != nil { + return m.OrganizationalUnit + } + return nil +} + +func init() { + proto.RegisterType((*CertificateProperties)(nil), "models.CertificateProperties") +} + +func init() { proto.RegisterFile("certificate_properties.proto", fileDescriptor_9291b57c1fe01997) } + +var fileDescriptor_9291b57c1fe01997 = []byte{ + // 169 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0x4e, 0x2d, 0x2a, + 0xc9, 0x4c, 0xcb, 0x4c, 0x4e, 0x2c, 0x49, 0x8d, 0x2f, 0x28, 0xca, 0x2f, 0x00, 0x71, 0x53, 0x8b, + 0xf5, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0xd8, 0x72, 0xf3, 0x53, 0x52, 0x73, 0x8a, 0x95, 0x3c, + 0xb8, 0x44, 0x9d, 0x11, 0xea, 0x02, 0xe0, 0xca, 0x84, 0xf4, 0xb9, 0x84, 0xf3, 0x8b, 0xd2, 0x13, + 0xf3, 0x32, 0xab, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0x12, 0x73, 0xe2, 0x4b, 0xf3, 0x32, 0x4b, 0x24, + 0x18, 0x15, 0x98, 0x35, 0x38, 0x83, 0x84, 0x50, 0xa5, 0x42, 0xf3, 0x32, 0x4b, 0x9c, 0x4c, 0x2e, + 0x3c, 0x94, 0x63, 0xb8, 0xf1, 0x50, 0x8e, 0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, + 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, + 0x8c, 0x2f, 0x1e, 0xc9, 0x31, 0x7c, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, + 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x24, 0xb1, 0x81, 0x9d, 0x63, 0x0c, 0x08, 0x00, 0x00, 0xff, + 0xff, 0x07, 0xe2, 0x02, 0xdf, 0xae, 0x00, 0x00, 0x00, +} + +func (this *CertificateProperties) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CertificateProperties) + if !ok { + that2, ok := that.(CertificateProperties) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.OrganizationalUnit) != len(that1.OrganizationalUnit) { + return false + } + for i := range this.OrganizationalUnit { + if this.OrganizationalUnit[i] != that1.OrganizationalUnit[i] { + return false + } + } + return true +} +func (this *CertificateProperties) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.CertificateProperties{") + s = append(s, "OrganizationalUnit: "+fmt.Sprintf("%#v", this.OrganizationalUnit)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringCertificateProperties(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *CertificateProperties) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CertificateProperties) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateProperties) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.OrganizationalUnit) > 0 { + for iNdEx := len(m.OrganizationalUnit) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.OrganizationalUnit[iNdEx]) + copy(dAtA[i:], m.OrganizationalUnit[iNdEx]) + i = encodeVarintCertificateProperties(dAtA, i, uint64(len(m.OrganizationalUnit[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintCertificateProperties(dAtA []byte, offset int, v uint64) int { + offset -= sovCertificateProperties(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CertificateProperties) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.OrganizationalUnit) > 0 { + for _, s := range m.OrganizationalUnit { + l = len(s) + n += 1 + l + sovCertificateProperties(uint64(l)) + } + } + return n +} + +func sovCertificateProperties(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozCertificateProperties(x uint64) (n int) { + return sovCertificateProperties(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CertificateProperties) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CertificateProperties{`, + `OrganizationalUnit:` + fmt.Sprintf("%v", this.OrganizationalUnit) + `,`, + `}`, + }, "") + return s +} +func valueToStringCertificateProperties(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CertificateProperties) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCertificateProperties + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertificateProperties: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertificateProperties: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OrganizationalUnit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCertificateProperties + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCertificateProperties + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCertificateProperties + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OrganizationalUnit = append(m.OrganizationalUnit, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCertificateProperties(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCertificateProperties + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCertificateProperties(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCertificateProperties + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCertificateProperties + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCertificateProperties + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthCertificateProperties + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupCertificateProperties + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthCertificateProperties + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthCertificateProperties = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCertificateProperties = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupCertificateProperties = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/certificate_properties.proto b/vendor/code.cloudfoundry.org/bbs/models/certificate_properties.proto new file mode 100644 index 00000000..9eced102 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/certificate_properties.proto @@ -0,0 +1,8 @@ +syntax = "proto3"; + +package models; + +message CertificateProperties { + repeated string organizational_unit = 1; +} + diff --git a/vendor/code.cloudfoundry.org/bbs/models/check_definition.go b/vendor/code.cloudfoundry.org/bbs/models/check_definition.go new file mode 100644 index 00000000..9772c5db --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/check_definition.go @@ -0,0 +1,55 @@ +package models + +type PortChecker interface { + GetPort() uint32 +} + +func (check CheckDefinition) Validate() error { + var validationError ValidationError + + checks := check.GetChecks() + + for _, check := range checks { + checkError := check.Validate() + if checkError != nil { + validationError = validationError.Append(checkError) + } + } + + readiness_checks := check.GetReadinessChecks() + + for _, check := range readiness_checks { + checkError := check.Validate() + if checkError != nil { + validationError = validationError.Append(checkError) + } + } + + return validationError.ToError() + +} + +func (check Check) GetPortChecker() PortChecker { + httpCheck := check.GetHttpCheck() + tcpCheck := check.GetTcpCheck() + if httpCheck != nil && tcpCheck != nil { + return nil + } + if httpCheck != nil { + return httpCheck + } else { + return tcpCheck + } +} + +func (check Check) Validate() error { + var validationError ValidationError + c := check.GetPortChecker() + + if c == nil { + validationError = validationError.Append(ErrInvalidField{"check"}) + } else if !(c.GetPort() > 0 && c.GetPort() <= 65535) { + validationError = validationError.Append(ErrInvalidField{"port"}) + } + return validationError.ToError() +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/check_definition.pb.go b/vendor/code.cloudfoundry.org/bbs/models/check_definition.pb.go new file mode 100644 index 00000000..d4c6ff75 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/check_definition.pb.go @@ -0,0 +1,1453 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: check_definition.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type CheckDefinition struct { + Checks []*Check `protobuf:"bytes,1,rep,name=checks,proto3" json:"checks,omitempty"` + LogSource string `protobuf:"bytes,2,opt,name=log_source,json=logSource,proto3" json:"log_source"` + ReadinessChecks []*Check `protobuf:"bytes,3,rep,name=readiness_checks,json=readinessChecks,proto3" json:"readiness_checks,omitempty"` +} + +func (m *CheckDefinition) Reset() { *m = CheckDefinition{} } +func (*CheckDefinition) ProtoMessage() {} +func (*CheckDefinition) Descriptor() ([]byte, []int) { + return fileDescriptor_048a62b88ce7913d, []int{0} +} +func (m *CheckDefinition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CheckDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CheckDefinition.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CheckDefinition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CheckDefinition.Merge(m, src) +} +func (m *CheckDefinition) XXX_Size() int { + return m.Size() +} +func (m *CheckDefinition) XXX_DiscardUnknown() { + xxx_messageInfo_CheckDefinition.DiscardUnknown(m) +} + +var xxx_messageInfo_CheckDefinition proto.InternalMessageInfo + +func (m *CheckDefinition) GetChecks() []*Check { + if m != nil { + return m.Checks + } + return nil +} + +func (m *CheckDefinition) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +func (m *CheckDefinition) GetReadinessChecks() []*Check { + if m != nil { + return m.ReadinessChecks + } + return nil +} + +type Check struct { + // oneof is hard to use right now, instead we can do this check in validation + // oneof check { + TcpCheck *TCPCheck `protobuf:"bytes,1,opt,name=tcp_check,json=tcpCheck,proto3" json:"tcp_check,omitempty"` + HttpCheck *HTTPCheck `protobuf:"bytes,2,opt,name=http_check,json=httpCheck,proto3" json:"http_check,omitempty"` +} + +func (m *Check) Reset() { *m = Check{} } +func (*Check) ProtoMessage() {} +func (*Check) Descriptor() ([]byte, []int) { + return fileDescriptor_048a62b88ce7913d, []int{1} +} +func (m *Check) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Check) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Check.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Check) XXX_Merge(src proto.Message) { + xxx_messageInfo_Check.Merge(m, src) +} +func (m *Check) XXX_Size() int { + return m.Size() +} +func (m *Check) XXX_DiscardUnknown() { + xxx_messageInfo_Check.DiscardUnknown(m) +} + +var xxx_messageInfo_Check proto.InternalMessageInfo + +func (m *Check) GetTcpCheck() *TCPCheck { + if m != nil { + return m.TcpCheck + } + return nil +} + +func (m *Check) GetHttpCheck() *HTTPCheck { + if m != nil { + return m.HttpCheck + } + return nil +} + +type TCPCheck struct { + Port uint32 `protobuf:"varint,1,opt,name=port,proto3" json:"port"` + ConnectTimeoutMs uint64 `protobuf:"varint,2,opt,name=connect_timeout_ms,json=connectTimeoutMs,proto3" json:"connect_timeout_ms,omitempty"` + IntervalMs uint64 `protobuf:"varint,3,opt,name=interval_ms,json=intervalMs,proto3" json:"interval_ms,omitempty"` +} + +func (m *TCPCheck) Reset() { *m = TCPCheck{} } +func (*TCPCheck) ProtoMessage() {} +func (*TCPCheck) Descriptor() ([]byte, []int) { + return fileDescriptor_048a62b88ce7913d, []int{2} +} +func (m *TCPCheck) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TCPCheck) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TCPCheck.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TCPCheck) XXX_Merge(src proto.Message) { + xxx_messageInfo_TCPCheck.Merge(m, src) +} +func (m *TCPCheck) XXX_Size() int { + return m.Size() +} +func (m *TCPCheck) XXX_DiscardUnknown() { + xxx_messageInfo_TCPCheck.DiscardUnknown(m) +} + +var xxx_messageInfo_TCPCheck proto.InternalMessageInfo + +func (m *TCPCheck) GetPort() uint32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *TCPCheck) GetConnectTimeoutMs() uint64 { + if m != nil { + return m.ConnectTimeoutMs + } + return 0 +} + +func (m *TCPCheck) GetIntervalMs() uint64 { + if m != nil { + return m.IntervalMs + } + return 0 +} + +type HTTPCheck struct { + Port uint32 `protobuf:"varint,1,opt,name=port,proto3" json:"port"` + RequestTimeoutMs uint64 `protobuf:"varint,2,opt,name=request_timeout_ms,json=requestTimeoutMs,proto3" json:"request_timeout_ms,omitempty"` + Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path"` + IntervalMs uint64 `protobuf:"varint,4,opt,name=interval_ms,json=intervalMs,proto3" json:"interval_ms,omitempty"` +} + +func (m *HTTPCheck) Reset() { *m = HTTPCheck{} } +func (*HTTPCheck) ProtoMessage() {} +func (*HTTPCheck) Descriptor() ([]byte, []int) { + return fileDescriptor_048a62b88ce7913d, []int{3} +} +func (m *HTTPCheck) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPCheck) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HTTPCheck.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HTTPCheck) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPCheck.Merge(m, src) +} +func (m *HTTPCheck) XXX_Size() int { + return m.Size() +} +func (m *HTTPCheck) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPCheck.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPCheck proto.InternalMessageInfo + +func (m *HTTPCheck) GetPort() uint32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *HTTPCheck) GetRequestTimeoutMs() uint64 { + if m != nil { + return m.RequestTimeoutMs + } + return 0 +} + +func (m *HTTPCheck) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *HTTPCheck) GetIntervalMs() uint64 { + if m != nil { + return m.IntervalMs + } + return 0 +} + +func init() { + proto.RegisterType((*CheckDefinition)(nil), "models.CheckDefinition") + proto.RegisterType((*Check)(nil), "models.Check") + proto.RegisterType((*TCPCheck)(nil), "models.TCPCheck") + proto.RegisterType((*HTTPCheck)(nil), "models.HTTPCheck") +} + +func init() { proto.RegisterFile("check_definition.proto", fileDescriptor_048a62b88ce7913d) } + +var fileDescriptor_048a62b88ce7913d = []byte{ + // 414 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x31, 0x8f, 0xd3, 0x40, + 0x10, 0x85, 0xbd, 0x97, 0x10, 0xc5, 0x13, 0x1d, 0x17, 0xb6, 0x40, 0x11, 0x42, 0x9b, 0xc8, 0x12, + 0x52, 0x0a, 0xe2, 0x43, 0x07, 0x05, 0x75, 0x8e, 0x82, 0xe6, 0x24, 0x64, 0xdc, 0x5b, 0xce, 0x66, + 0xcf, 0xb6, 0xb0, 0xbd, 0xc6, 0xbb, 0x86, 0x96, 0x9f, 0x40, 0x45, 0x4f, 0xc7, 0x4f, 0xa1, 0x4c, + 0x99, 0x2a, 0x22, 0x4e, 0x83, 0x52, 0xe5, 0x27, 0x20, 0x8f, 0xed, 0x80, 0xa2, 0x48, 0x34, 0xd6, + 0xcc, 0xbe, 0xef, 0xbd, 0x37, 0x85, 0xe1, 0x31, 0x0f, 0x05, 0xff, 0xe0, 0x2d, 0xc5, 0x7d, 0x94, + 0x46, 0x3a, 0x92, 0xa9, 0x9d, 0xe5, 0x52, 0x4b, 0xda, 0x4b, 0xe4, 0x52, 0xc4, 0xea, 0xc9, 0x2c, + 0x88, 0x74, 0x58, 0x2c, 0x6c, 0x2e, 0x93, 0xeb, 0x40, 0x06, 0xf2, 0x1a, 0xe5, 0x45, 0x71, 0x8f, + 0x1b, 0x2e, 0x38, 0xd5, 0x36, 0xeb, 0x3b, 0x81, 0xab, 0xdb, 0x2a, 0xf1, 0xcd, 0x31, 0x90, 0x3e, + 0x83, 0x1e, 0x96, 0xa8, 0x11, 0x99, 0x74, 0xa6, 0x83, 0x9b, 0x4b, 0xbb, 0xce, 0xb6, 0x11, 0x74, + 0x1a, 0x91, 0xce, 0x00, 0x62, 0x19, 0x78, 0x4a, 0x16, 0x39, 0x17, 0xa3, 0x8b, 0x09, 0x99, 0x9a, + 0xf3, 0x87, 0xfb, 0xcd, 0xf8, 0x9f, 0x57, 0xc7, 0x8c, 0x65, 0xf0, 0x1e, 0x47, 0xfa, 0x1a, 0x86, + 0xb9, 0xf0, 0x97, 0x51, 0x2a, 0x94, 0xf2, 0x9a, 0xfc, 0xce, 0xb9, 0xfc, 0xab, 0x23, 0x86, 0xbb, + 0xb2, 0x42, 0x78, 0x80, 0x13, 0x9d, 0x81, 0xa9, 0x79, 0x56, 0x9b, 0x47, 0x64, 0x42, 0xa6, 0x83, + 0x9b, 0x61, 0xeb, 0x75, 0x6f, 0xdf, 0xd5, 0xf6, 0xbe, 0xe6, 0x59, 0x8d, 0xbf, 0x00, 0x08, 0xb5, + 0x6e, 0xf9, 0x0b, 0xe4, 0x1f, 0xb5, 0xfc, 0x5b, 0xd7, 0x6d, 0x0c, 0x66, 0x05, 0xe1, 0x68, 0x7d, + 0x86, 0x7e, 0x9b, 0x43, 0x9f, 0x42, 0x37, 0x93, 0xb9, 0xc6, 0x9e, 0xcb, 0x79, 0x7f, 0xbf, 0x19, + 0xe3, 0xee, 0xe0, 0x97, 0x3e, 0x07, 0xca, 0x65, 0x9a, 0x0a, 0xae, 0x3d, 0x1d, 0x25, 0x42, 0x16, + 0xda, 0x4b, 0x14, 0x76, 0x74, 0x9d, 0x61, 0xa3, 0xb8, 0xb5, 0x70, 0xa7, 0xe8, 0x18, 0x06, 0x51, + 0xaa, 0x45, 0xfe, 0xc9, 0x8f, 0x2b, 0xac, 0x83, 0x18, 0xb4, 0x4f, 0x77, 0xca, 0xfa, 0x46, 0xc0, + 0x3c, 0x5e, 0xf4, 0xff, 0xea, 0x5c, 0x7c, 0x2c, 0x84, 0x3a, 0x57, 0xdd, 0x28, 0x7f, 0xab, 0xab, + 0x2c, 0x5f, 0x87, 0xd8, 0x69, 0x36, 0x59, 0xbe, 0x0e, 0x1d, 0xfc, 0x9e, 0x1e, 0xd6, 0x3d, 0x3d, + 0x6c, 0xfe, 0x6a, 0xb5, 0x65, 0xc6, 0x7a, 0xcb, 0x8c, 0xc3, 0x96, 0x91, 0x2f, 0x25, 0x23, 0x3f, + 0x4a, 0x46, 0x7e, 0x96, 0x8c, 0xac, 0x4a, 0x46, 0x7e, 0x95, 0x8c, 0xfc, 0x2e, 0x99, 0x71, 0x28, + 0x19, 0xf9, 0xba, 0x63, 0xc6, 0x6a, 0xc7, 0x8c, 0xf5, 0x8e, 0x19, 0x8b, 0x1e, 0xfe, 0x5c, 0x2f, + 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0xdc, 0xc4, 0x3b, 0x86, 0xad, 0x02, 0x00, 0x00, +} + +func (this *CheckDefinition) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CheckDefinition) + if !ok { + that2, ok := that.(CheckDefinition) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Checks) != len(that1.Checks) { + return false + } + for i := range this.Checks { + if !this.Checks[i].Equal(that1.Checks[i]) { + return false + } + } + if this.LogSource != that1.LogSource { + return false + } + if len(this.ReadinessChecks) != len(that1.ReadinessChecks) { + return false + } + for i := range this.ReadinessChecks { + if !this.ReadinessChecks[i].Equal(that1.ReadinessChecks[i]) { + return false + } + } + return true +} +func (this *Check) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Check) + if !ok { + that2, ok := that.(Check) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.TcpCheck.Equal(that1.TcpCheck) { + return false + } + if !this.HttpCheck.Equal(that1.HttpCheck) { + return false + } + return true +} +func (this *TCPCheck) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TCPCheck) + if !ok { + that2, ok := that.(TCPCheck) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Port != that1.Port { + return false + } + if this.ConnectTimeoutMs != that1.ConnectTimeoutMs { + return false + } + if this.IntervalMs != that1.IntervalMs { + return false + } + return true +} +func (this *HTTPCheck) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*HTTPCheck) + if !ok { + that2, ok := that.(HTTPCheck) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Port != that1.Port { + return false + } + if this.RequestTimeoutMs != that1.RequestTimeoutMs { + return false + } + if this.Path != that1.Path { + return false + } + if this.IntervalMs != that1.IntervalMs { + return false + } + return true +} +func (this *CheckDefinition) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&models.CheckDefinition{") + if this.Checks != nil { + s = append(s, "Checks: "+fmt.Sprintf("%#v", this.Checks)+",\n") + } + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + if this.ReadinessChecks != nil { + s = append(s, "ReadinessChecks: "+fmt.Sprintf("%#v", this.ReadinessChecks)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Check) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.Check{") + if this.TcpCheck != nil { + s = append(s, "TcpCheck: "+fmt.Sprintf("%#v", this.TcpCheck)+",\n") + } + if this.HttpCheck != nil { + s = append(s, "HttpCheck: "+fmt.Sprintf("%#v", this.HttpCheck)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TCPCheck) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&models.TCPCheck{") + s = append(s, "Port: "+fmt.Sprintf("%#v", this.Port)+",\n") + s = append(s, "ConnectTimeoutMs: "+fmt.Sprintf("%#v", this.ConnectTimeoutMs)+",\n") + s = append(s, "IntervalMs: "+fmt.Sprintf("%#v", this.IntervalMs)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *HTTPCheck) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&models.HTTPCheck{") + s = append(s, "Port: "+fmt.Sprintf("%#v", this.Port)+",\n") + s = append(s, "RequestTimeoutMs: "+fmt.Sprintf("%#v", this.RequestTimeoutMs)+",\n") + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + s = append(s, "IntervalMs: "+fmt.Sprintf("%#v", this.IntervalMs)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringCheckDefinition(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *CheckDefinition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckDefinition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CheckDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ReadinessChecks) > 0 { + for iNdEx := len(m.ReadinessChecks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ReadinessChecks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCheckDefinition(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintCheckDefinition(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x12 + } + if len(m.Checks) > 0 { + for iNdEx := len(m.Checks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Checks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCheckDefinition(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Check) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Check) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Check) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.HttpCheck != nil { + { + size, err := m.HttpCheck.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCheckDefinition(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.TcpCheck != nil { + { + size, err := m.TcpCheck.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCheckDefinition(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TCPCheck) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TCPCheck) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TCPCheck) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IntervalMs != 0 { + i = encodeVarintCheckDefinition(dAtA, i, uint64(m.IntervalMs)) + i-- + dAtA[i] = 0x18 + } + if m.ConnectTimeoutMs != 0 { + i = encodeVarintCheckDefinition(dAtA, i, uint64(m.ConnectTimeoutMs)) + i-- + dAtA[i] = 0x10 + } + if m.Port != 0 { + i = encodeVarintCheckDefinition(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HTTPCheck) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPCheck) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPCheck) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IntervalMs != 0 { + i = encodeVarintCheckDefinition(dAtA, i, uint64(m.IntervalMs)) + i-- + dAtA[i] = 0x20 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintCheckDefinition(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x1a + } + if m.RequestTimeoutMs != 0 { + i = encodeVarintCheckDefinition(dAtA, i, uint64(m.RequestTimeoutMs)) + i-- + dAtA[i] = 0x10 + } + if m.Port != 0 { + i = encodeVarintCheckDefinition(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintCheckDefinition(dAtA []byte, offset int, v uint64) int { + offset -= sovCheckDefinition(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CheckDefinition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Checks) > 0 { + for _, e := range m.Checks { + l = e.Size() + n += 1 + l + sovCheckDefinition(uint64(l)) + } + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovCheckDefinition(uint64(l)) + } + if len(m.ReadinessChecks) > 0 { + for _, e := range m.ReadinessChecks { + l = e.Size() + n += 1 + l + sovCheckDefinition(uint64(l)) + } + } + return n +} + +func (m *Check) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TcpCheck != nil { + l = m.TcpCheck.Size() + n += 1 + l + sovCheckDefinition(uint64(l)) + } + if m.HttpCheck != nil { + l = m.HttpCheck.Size() + n += 1 + l + sovCheckDefinition(uint64(l)) + } + return n +} + +func (m *TCPCheck) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Port != 0 { + n += 1 + sovCheckDefinition(uint64(m.Port)) + } + if m.ConnectTimeoutMs != 0 { + n += 1 + sovCheckDefinition(uint64(m.ConnectTimeoutMs)) + } + if m.IntervalMs != 0 { + n += 1 + sovCheckDefinition(uint64(m.IntervalMs)) + } + return n +} + +func (m *HTTPCheck) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Port != 0 { + n += 1 + sovCheckDefinition(uint64(m.Port)) + } + if m.RequestTimeoutMs != 0 { + n += 1 + sovCheckDefinition(uint64(m.RequestTimeoutMs)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + sovCheckDefinition(uint64(l)) + } + if m.IntervalMs != 0 { + n += 1 + sovCheckDefinition(uint64(m.IntervalMs)) + } + return n +} + +func sovCheckDefinition(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozCheckDefinition(x uint64) (n int) { + return sovCheckDefinition(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CheckDefinition) String() string { + if this == nil { + return "nil" + } + repeatedStringForChecks := "[]*Check{" + for _, f := range this.Checks { + repeatedStringForChecks += strings.Replace(f.String(), "Check", "Check", 1) + "," + } + repeatedStringForChecks += "}" + repeatedStringForReadinessChecks := "[]*Check{" + for _, f := range this.ReadinessChecks { + repeatedStringForReadinessChecks += strings.Replace(f.String(), "Check", "Check", 1) + "," + } + repeatedStringForReadinessChecks += "}" + s := strings.Join([]string{`&CheckDefinition{`, + `Checks:` + repeatedStringForChecks + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `ReadinessChecks:` + repeatedStringForReadinessChecks + `,`, + `}`, + }, "") + return s +} +func (this *Check) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Check{`, + `TcpCheck:` + strings.Replace(this.TcpCheck.String(), "TCPCheck", "TCPCheck", 1) + `,`, + `HttpCheck:` + strings.Replace(this.HttpCheck.String(), "HTTPCheck", "HTTPCheck", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TCPCheck) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TCPCheck{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `ConnectTimeoutMs:` + fmt.Sprintf("%v", this.ConnectTimeoutMs) + `,`, + `IntervalMs:` + fmt.Sprintf("%v", this.IntervalMs) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPCheck) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPCheck{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `RequestTimeoutMs:` + fmt.Sprintf("%v", this.RequestTimeoutMs) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `IntervalMs:` + fmt.Sprintf("%v", this.IntervalMs) + `,`, + `}`, + }, "") + return s +} +func valueToStringCheckDefinition(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CheckDefinition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CheckDefinition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CheckDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Checks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCheckDefinition + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCheckDefinition + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Checks = append(m.Checks, &Check{}) + if err := m.Checks[len(m.Checks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCheckDefinition + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCheckDefinition + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessChecks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCheckDefinition + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCheckDefinition + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReadinessChecks = append(m.ReadinessChecks, &Check{}) + if err := m.ReadinessChecks[len(m.ReadinessChecks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCheckDefinition(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCheckDefinition + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Check) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Check: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Check: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TcpCheck", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCheckDefinition + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCheckDefinition + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TcpCheck == nil { + m.TcpCheck = &TCPCheck{} + } + if err := m.TcpCheck.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HttpCheck", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCheckDefinition + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCheckDefinition + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HttpCheck == nil { + m.HttpCheck = &HTTPCheck{} + } + if err := m.HttpCheck.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCheckDefinition(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCheckDefinition + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TCPCheck) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TCPCheck: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TCPCheck: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectTimeoutMs", wireType) + } + m.ConnectTimeoutMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ConnectTimeoutMs |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntervalMs", wireType) + } + m.IntervalMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.IntervalMs |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipCheckDefinition(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCheckDefinition + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPCheck) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPCheck: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPCheck: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestTimeoutMs", wireType) + } + m.RequestTimeoutMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RequestTimeoutMs |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCheckDefinition + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCheckDefinition + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntervalMs", wireType) + } + m.IntervalMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.IntervalMs |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipCheckDefinition(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCheckDefinition + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCheckDefinition(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCheckDefinition + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthCheckDefinition + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupCheckDefinition + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthCheckDefinition + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthCheckDefinition = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCheckDefinition = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupCheckDefinition = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/check_definition.proto b/vendor/code.cloudfoundry.org/bbs/models/check_definition.proto new file mode 100644 index 00000000..e38338d5 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/check_definition.proto @@ -0,0 +1,33 @@ + +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +message CheckDefinition { + repeated Check checks = 1; + string log_source = 2 [(gogoproto.jsontag) = "log_source"]; + repeated Check readiness_checks = 3; +} + +message Check { + // oneof is hard to use right now, instead we can do this check in validation + // oneof check { + TCPCheck tcp_check = 1; + HTTPCheck http_check = 2; + // } +} + +message TCPCheck { + uint32 port = 1 [(gogoproto.jsontag) = "port"]; + uint64 connect_timeout_ms = 2; + uint64 interval_ms = 3; +} + +message HTTPCheck { + uint32 port = 1 [(gogoproto.jsontag) = "port"]; + uint64 request_timeout_ms = 2; + string path = 3 [(gogoproto.jsontag) = "path"]; + uint64 interval_ms = 4; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/desired_lrp.go b/vendor/code.cloudfoundry.org/bbs/models/desired_lrp.go new file mode 100644 index 00000000..9fd0ddfc --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/desired_lrp.go @@ -0,0 +1,785 @@ +package models + +import ( + bytes "bytes" + "encoding/json" + "net/url" + "regexp" + "time" + + "code.cloudfoundry.org/bbs/format" +) + +const PreloadedRootFSScheme = "preloaded" +const PreloadedOCIRootFSScheme = "preloaded+layer" + +var processGuidPattern = regexp.MustCompile(`^[a-zA-Z0-9_-]+$`) + +type DesiredLRPChange struct { + Before *DesiredLRP + After *DesiredLRP +} + +type DesiredLRPFilter struct { + Domain string + ProcessGuids []string +} + +func PreloadedRootFS(stack string) string { + return (&url.URL{ + Scheme: PreloadedRootFSScheme, + Opaque: stack, + }).String() +} + +func NewDesiredLRP(schedInfo DesiredLRPSchedulingInfo, runInfo DesiredLRPRunInfo) DesiredLRP { + environmentVariables := make([]*EnvironmentVariable, len(runInfo.EnvironmentVariables)) + for i := range runInfo.EnvironmentVariables { + environmentVariables[i] = &runInfo.EnvironmentVariables[i] + } + + egressRules := make([]*SecurityGroupRule, len(runInfo.EgressRules)) + for i := range runInfo.EgressRules { + egressRules[i] = &runInfo.EgressRules[i] + } + + return DesiredLRP{ + ProcessGuid: schedInfo.ProcessGuid, + Domain: schedInfo.Domain, + LogGuid: schedInfo.LogGuid, + MemoryMb: schedInfo.MemoryMb, + DiskMb: schedInfo.DiskMb, + MaxPids: schedInfo.MaxPids, + RootFs: schedInfo.RootFs, + Instances: schedInfo.Instances, + Annotation: schedInfo.Annotation, + Routes: &schedInfo.Routes, + ModificationTag: &schedInfo.ModificationTag, + EnvironmentVariables: environmentVariables, + CachedDependencies: runInfo.CachedDependencies, + Setup: runInfo.Setup, + Action: runInfo.Action, + Monitor: runInfo.Monitor, + StartTimeoutMs: runInfo.StartTimeoutMs, + Privileged: runInfo.Privileged, + CpuWeight: runInfo.CpuWeight, + Ports: runInfo.Ports, + EgressRules: egressRules, + LogSource: runInfo.LogSource, + MetricsGuid: runInfo.MetricsGuid, + LegacyDownloadUser: runInfo.LegacyDownloadUser, + TrustedSystemCertificatesPath: runInfo.TrustedSystemCertificatesPath, + VolumeMounts: runInfo.VolumeMounts, + Network: runInfo.Network, + PlacementTags: schedInfo.PlacementTags, + CertificateProperties: runInfo.CertificateProperties, + ImageUsername: runInfo.ImageUsername, + ImagePassword: runInfo.ImagePassword, + CheckDefinition: runInfo.CheckDefinition, + ImageLayers: runInfo.ImageLayers, + MetricTags: runInfo.MetricTags, + Sidecars: runInfo.Sidecars, + LogRateLimit: runInfo.LogRateLimit, + } +} + +func (desiredLRP *DesiredLRP) AddRunInfo(runInfo DesiredLRPRunInfo) { + environmentVariables := make([]*EnvironmentVariable, len(runInfo.EnvironmentVariables)) + for i := range runInfo.EnvironmentVariables { + environmentVariables[i] = &runInfo.EnvironmentVariables[i] + } + + egressRules := make([]*SecurityGroupRule, len(runInfo.EgressRules)) + for i := range runInfo.EgressRules { + egressRules[i] = &runInfo.EgressRules[i] + } + + desiredLRP.EnvironmentVariables = environmentVariables + desiredLRP.CachedDependencies = runInfo.CachedDependencies + desiredLRP.Setup = runInfo.Setup + desiredLRP.Action = runInfo.Action + desiredLRP.Monitor = runInfo.Monitor + desiredLRP.StartTimeoutMs = runInfo.StartTimeoutMs + desiredLRP.Privileged = runInfo.Privileged + desiredLRP.CpuWeight = runInfo.CpuWeight + desiredLRP.Ports = runInfo.Ports + desiredLRP.EgressRules = egressRules + desiredLRP.LogSource = runInfo.LogSource + desiredLRP.MetricsGuid = runInfo.MetricsGuid + desiredLRP.LegacyDownloadUser = runInfo.LegacyDownloadUser + desiredLRP.TrustedSystemCertificatesPath = runInfo.TrustedSystemCertificatesPath + desiredLRP.VolumeMounts = runInfo.VolumeMounts + desiredLRP.Network = runInfo.Network + desiredLRP.CheckDefinition = runInfo.CheckDefinition +} + +func (*DesiredLRP) Version() format.Version { + return format.V3 +} + +func (d *DesiredLRP) actionsFromCachedDependencies() []ActionInterface { + actions := make([]ActionInterface, len(d.CachedDependencies)) + for i := range d.CachedDependencies { + cacheDependency := d.CachedDependencies[i] + actions[i] = &DownloadAction{ + Artifact: cacheDependency.Name, + From: cacheDependency.From, + To: cacheDependency.To, + CacheKey: cacheDependency.CacheKey, + LogSource: cacheDependency.LogSource, + User: d.LegacyDownloadUser, + } + } + return actions +} + +func newDesiredLRPWithCachedDependenciesAsSetupActions(d *DesiredLRP) *DesiredLRP { + d = d.Copy() + if len(d.CachedDependencies) > 0 { + + cachedDownloads := Parallel(d.actionsFromCachedDependencies()...) + + if d.Setup != nil { + d.Setup = WrapAction(Serial(cachedDownloads, UnwrapAction(d.Setup))) + } else { + d.Setup = WrapAction(Serial(cachedDownloads)) + } + d.CachedDependencies = nil + } + + return d +} + +func downgradeDesiredLRPV2ToV1(d *DesiredLRP) *DesiredLRP { + return d +} + +func downgradeDesiredLRPV1ToV0(d *DesiredLRP) *DesiredLRP { + d.Action = d.Action.SetDeprecatedTimeoutNs() + d.Setup = d.Setup.SetDeprecatedTimeoutNs() + d.Monitor = d.Monitor.SetDeprecatedTimeoutNs() + d.DeprecatedStartTimeoutS = uint32(d.StartTimeoutMs) / 1000 + return newDesiredLRPWithCachedDependenciesAsSetupActions(d) +} + +func downgradeDesiredLRPV3ToV2(d *DesiredLRP) *DesiredLRP { + layers := ImageLayers(d.ImageLayers) + + d.CachedDependencies = append(layers.ToCachedDependencies(), d.CachedDependencies...) + d.Setup = layers.ToDownloadActions(d.LegacyDownloadUser, d.Setup) + d.ImageLayers = nil + + return d +} + +var downgrades = []func(*DesiredLRP) *DesiredLRP{ + downgradeDesiredLRPV1ToV0, + downgradeDesiredLRPV2ToV1, + downgradeDesiredLRPV3ToV2, +} + +func (d *DesiredLRP) VersionDownTo(v format.Version) *DesiredLRP { + versionedLRP := d.Copy() + + for version := d.Version(); version > v; version-- { + versionedLRP = downgrades[version-1](versionedLRP) + } + + return versionedLRP +} + +func (d *DesiredLRP) PopulateMetricsGuid() *DesiredLRP { + sourceId, sourceIDIsSet := d.MetricTags["source_id"] + switch { + case sourceIDIsSet && d.MetricsGuid == "": + d.MetricsGuid = sourceId.Static + case !sourceIDIsSet && d.MetricsGuid != "": + if d.MetricTags == nil { + d.MetricTags = make(map[string]*MetricTagValue) + } + d.MetricTags["source_id"] = &MetricTagValue{ + Static: d.MetricsGuid, + } + } + return d +} + +func (d *DesiredLRP) DesiredLRPKey() DesiredLRPKey { + return NewDesiredLRPKey(d.ProcessGuid, d.Domain, d.LogGuid) +} + +func (d *DesiredLRP) DesiredLRPResource() DesiredLRPResource { + return NewDesiredLRPResource(d.MemoryMb, d.DiskMb, d.MaxPids, d.RootFs) +} + +func (d *DesiredLRP) DesiredLRPSchedulingInfo() DesiredLRPSchedulingInfo { + var routes Routes + if d.Routes != nil { + routes = *d.Routes + } + var modificationTag ModificationTag + if d.ModificationTag != nil { + modificationTag = *d.ModificationTag + } + + var volumePlacement VolumePlacement + volumePlacement.DriverNames = []string{} + for _, mount := range d.VolumeMounts { + volumePlacement.DriverNames = append(volumePlacement.DriverNames, mount.Driver) + } + + return NewDesiredLRPSchedulingInfo( + d.DesiredLRPKey(), + d.Annotation, + d.Instances, + d.DesiredLRPResource(), + routes, + modificationTag, + &volumePlacement, + d.PlacementTags, + ) +} + +func (d *DesiredLRP) DesiredLRPRoutingInfo() DesiredLRP { + var routes Routes + if d.Routes != nil { + routes = *d.Routes + } + + var modificationTag ModificationTag + if d.ModificationTag != nil { + modificationTag = *d.ModificationTag + } + + return NewDesiredLRPRoutingInfo( + d.DesiredLRPKey(), + d.Instances, + &routes, + &modificationTag, + d.MetricTags, + ) +} + +func (d *DesiredLRP) DesiredLRPRunInfo(createdAt time.Time) DesiredLRPRunInfo { + environmentVariables := make([]EnvironmentVariable, len(d.EnvironmentVariables)) + for i := range d.EnvironmentVariables { + environmentVariables[i] = *d.EnvironmentVariables[i] + } + + egressRules := make([]SecurityGroupRule, len(d.EgressRules)) + for i := range d.EgressRules { + egressRules[i] = *d.EgressRules[i] + } + + return NewDesiredLRPRunInfo( + d.DesiredLRPKey(), + createdAt, + environmentVariables, + d.CachedDependencies, + d.Setup, + d.Action, + d.Monitor, + d.StartTimeoutMs, + d.Privileged, + d.CpuWeight, + d.Ports, + egressRules, + d.LogSource, + d.MetricsGuid, + d.LegacyDownloadUser, + d.TrustedSystemCertificatesPath, + d.VolumeMounts, + d.Network, + d.CertificateProperties, + d.ImageUsername, + d.ImagePassword, + d.CheckDefinition, + d.ImageLayers, + d.MetricTags, + d.Sidecars, + d.LogRateLimit, + ) +} + +func (d *DesiredLRP) Copy() *DesiredLRP { + newDesired := *d + return &newDesired +} + +func (desired DesiredLRP) Validate() error { + var validationError ValidationError + + if desired.GetDomain() == "" { + validationError = validationError.Append(ErrInvalidField{"domain"}) + } + + if desired.GetRootFs() == "" { + validationError = validationError.Append(ErrInvalidField{"rootfs"}) + } + + rootFSURL, err := url.Parse(desired.GetRootFs()) + if err != nil || rootFSURL.Scheme == "" { + validationError = validationError.Append(ErrInvalidField{"rootfs"}) + } + + if desired.GetInstances() < 0 { + validationError = validationError.Append(ErrInvalidField{"instances"}) + } + + if desired.GetMemoryMb() < 0 { + validationError = validationError.Append(ErrInvalidField{"memory_mb"}) + } + + if desired.GetDiskMb() < 0 { + validationError = validationError.Append(ErrInvalidField{"disk_mb"}) + } + + if limit := desired.GetLogRateLimit(); limit != nil { + if limit.GetBytesPerSecond() < -1 { + validationError = validationError.Append(ErrInvalidField{"log_rate_limit_bytes_per_second"}) + } + } + + if len(desired.GetAnnotation()) > maximumAnnotationLength { + validationError = validationError.Append(ErrInvalidField{"annotation"}) + } + + if desired.GetMaxPids() < 0 { + validationError = validationError.Append(ErrInvalidField{"max_pids"}) + } + + totalRoutesLength := 0 + if desired.Routes != nil { + for _, value := range *desired.Routes { + totalRoutesLength += len(*value) + if totalRoutesLength > maximumRouteLength { + validationError = validationError.Append(ErrInvalidField{"routes"}) + break + } + } + } + + runInfoErrors := desired.DesiredLRPRunInfo(time.Now()).Validate() + if runInfoErrors != nil { + validationError = validationError.Append(runInfoErrors) + } + + return validationError.ToError() +} + +func (desired *DesiredLRPUpdate) Validate() error { + var validationError ValidationError + + if desired.GetInstances() < 0 { + validationError = validationError.Append(ErrInvalidField{"instances"}) + } + + if len(desired.GetAnnotation()) > maximumAnnotationLength { + validationError = validationError.Append(ErrInvalidField{"annotation"}) + } + + totalRoutesLength := 0 + if desired.Routes != nil { + for _, value := range *desired.Routes { + totalRoutesLength += len(*value) + if totalRoutesLength > maximumRouteLength { + validationError = validationError.Append(ErrInvalidField{"routes"}) + break + } + } + } + + err := validateMetricTags(desired.MetricTags, "") + if err != nil { + validationError = validationError.Append(ErrInvalidField{"metric_tags"}) + validationError = validationError.Append(err) + } + + return validationError.ToError() +} + +func (desired *DesiredLRPUpdate) SetInstances(instances int32) { + desired.OptionalInstances = &DesiredLRPUpdate_Instances{ + Instances: instances, + } +} + +func (desired DesiredLRPUpdate) InstancesExists() bool { + _, ok := desired.GetOptionalInstances().(*DesiredLRPUpdate_Instances) + return ok +} + +func (desired *DesiredLRPUpdate) SetAnnotation(annotation string) { + desired.OptionalAnnotation = &DesiredLRPUpdate_Annotation{ + Annotation: annotation, + } +} + +func (desired DesiredLRPUpdate) AnnotationExists() bool { + _, ok := desired.GetOptionalAnnotation().(*DesiredLRPUpdate_Annotation) + return ok +} + +func (desired DesiredLRPUpdate) IsRoutesGroupUpdated(routes *Routes, routerGroup string) bool { + if desired.Routes == nil { + return false + } + + if routes == nil { + return true + } + + desiredRoutes, desiredRoutesPresent := (*desired.Routes)[routerGroup] + requestRoutes, requestRoutesPresent := (*routes)[routerGroup] + if desiredRoutesPresent != requestRoutesPresent { + return true + } + + if desiredRoutesPresent && requestRoutesPresent { + return !bytes.Equal(*desiredRoutes, *requestRoutes) + } + + return true +} + +func (desired DesiredLRPUpdate) IsMetricTagsUpdated(existingTags map[string]*MetricTagValue) bool { + if desired.MetricTags == nil { + return false + } + if len(desired.MetricTags) != len(existingTags) { + return true + } + for k, v := range existingTags { + updateTag, ok := desired.MetricTags[k] + if !ok { + return true + } + if updateTag.Static != v.Static || updateTag.Dynamic != v.Dynamic { + return true + } + } + return false +} + +type internalDesiredLRPUpdate struct { + Instances *int32 `json:"instances,omitempty"` + Routes *Routes `json:"routes,omitempty"` + Annotation *string `json:"annotation,omitempty"` + MetricTags map[string]*MetricTagValue `json:"metric_tags,omitempty"` +} + +func (desired *DesiredLRPUpdate) UnmarshalJSON(data []byte) error { + var update internalDesiredLRPUpdate + if err := json.Unmarshal(data, &update); err != nil { + return err + } + + if update.Instances != nil { + desired.SetInstances(*update.Instances) + } + desired.Routes = update.Routes + if update.Annotation != nil { + desired.SetAnnotation(*update.Annotation) + } + desired.MetricTags = update.MetricTags + + return nil +} + +func (desired DesiredLRPUpdate) MarshalJSON() ([]byte, error) { + var update internalDesiredLRPUpdate + if desired.InstancesExists() { + i := desired.GetInstances() + update.Instances = &i + } + update.Routes = desired.Routes + if desired.AnnotationExists() { + a := desired.GetAnnotation() + update.Annotation = &a + } + update.MetricTags = desired.MetricTags + return json.Marshal(update) +} + +func NewDesiredLRPKey(processGuid, domain, logGuid string) DesiredLRPKey { + return DesiredLRPKey{ + ProcessGuid: processGuid, + Domain: domain, + LogGuid: logGuid, + } +} + +func (key DesiredLRPKey) Validate() error { + var validationError ValidationError + if key.GetDomain() == "" { + validationError = validationError.Append(ErrInvalidField{"domain"}) + } + + if !processGuidPattern.MatchString(key.GetProcessGuid()) { + validationError = validationError.Append(ErrInvalidField{"process_guid"}) + } + + return validationError.ToError() +} + +func NewDesiredLRPSchedulingInfo( + key DesiredLRPKey, + annotation string, + instances int32, + resource DesiredLRPResource, + routes Routes, + modTag ModificationTag, + volumePlacement *VolumePlacement, + placementTags []string, +) DesiredLRPSchedulingInfo { + return DesiredLRPSchedulingInfo{ + DesiredLRPKey: key, + Annotation: annotation, + Instances: instances, + DesiredLRPResource: resource, + Routes: routes, + ModificationTag: modTag, + VolumePlacement: volumePlacement, + PlacementTags: placementTags, + } +} + +func NewDesiredLRPRoutingInfo( + key DesiredLRPKey, + instances int32, + routes *Routes, + modTag *ModificationTag, + metrTags map[string]*MetricTagValue, +) DesiredLRP { + return DesiredLRP{ + ProcessGuid: key.ProcessGuid, + Domain: key.Domain, + LogGuid: key.LogGuid, + Instances: instances, + Routes: routes, + ModificationTag: modTag, + MetricTags: metrTags, + } +} + +func (s *DesiredLRPSchedulingInfo) ApplyUpdate(update *DesiredLRPUpdate) { + if update.InstancesExists() { + s.Instances = update.GetInstances() + } + if update.Routes != nil { + s.Routes = *update.Routes + } + if update.AnnotationExists() { + s.Annotation = update.GetAnnotation() + } + s.ModificationTag.Increment() +} + +func (*DesiredLRPSchedulingInfo) Version() format.Version { + return format.V0 +} + +func (s DesiredLRPSchedulingInfo) Validate() error { + var validationError ValidationError + + validationError = validationError.Check(s.DesiredLRPKey, s.DesiredLRPResource, s.Routes) + + if s.GetInstances() < 0 { + validationError = validationError.Append(ErrInvalidField{"instances"}) + } + + if len(s.GetAnnotation()) > maximumAnnotationLength { + validationError = validationError.Append(ErrInvalidField{"annotation"}) + } + + return validationError.ToError() +} + +func NewDesiredLRPResource(memoryMb, diskMb, maxPids int32, rootFs string) DesiredLRPResource { + return DesiredLRPResource{ + MemoryMb: memoryMb, + DiskMb: diskMb, + MaxPids: maxPids, + RootFs: rootFs, + } +} + +func (resource DesiredLRPResource) Validate() error { + var validationError ValidationError + + rootFSURL, err := url.Parse(resource.GetRootFs()) + if err != nil || rootFSURL.Scheme == "" { + validationError = validationError.Append(ErrInvalidField{"rootfs"}) + } + + if resource.GetMemoryMb() < 0 { + validationError = validationError.Append(ErrInvalidField{"memory_mb"}) + } + + if resource.GetDiskMb() < 0 { + validationError = validationError.Append(ErrInvalidField{"disk_mb"}) + } + + if resource.GetMaxPids() < 0 { + validationError = validationError.Append(ErrInvalidField{"max_pids"}) + } + + return validationError.ToError() +} + +func NewDesiredLRPRunInfo( + key DesiredLRPKey, + createdAt time.Time, + envVars []EnvironmentVariable, + cacheDeps []*CachedDependency, + setup, + action, + monitor *Action, + startTimeoutMs int64, + privileged bool, + cpuWeight uint32, + ports []uint32, + egressRules []SecurityGroupRule, + logSource, + metricsGuid string, + legacyDownloadUser string, + trustedSystemCertificatesPath string, + volumeMounts []*VolumeMount, + network *Network, + certificateProperties *CertificateProperties, + imageUsername, imagePassword string, + checkDefinition *CheckDefinition, + imageLayers []*ImageLayer, + metricTags map[string]*MetricTagValue, + sidecars []*Sidecar, + logRateLimit *LogRateLimit, +) DesiredLRPRunInfo { + return DesiredLRPRunInfo{ + DesiredLRPKey: key, + CreatedAt: createdAt.UnixNano(), + EnvironmentVariables: envVars, + CachedDependencies: cacheDeps, + Setup: setup, + Action: action, + Monitor: monitor, + StartTimeoutMs: startTimeoutMs, + Privileged: privileged, + CpuWeight: cpuWeight, + Ports: ports, + EgressRules: egressRules, + LogSource: logSource, + MetricsGuid: metricsGuid, + LegacyDownloadUser: legacyDownloadUser, + TrustedSystemCertificatesPath: trustedSystemCertificatesPath, + VolumeMounts: volumeMounts, + Network: network, + CertificateProperties: certificateProperties, + ImageUsername: imageUsername, + ImagePassword: imagePassword, + CheckDefinition: checkDefinition, + ImageLayers: imageLayers, + MetricTags: metricTags, + Sidecars: sidecars, + LogRateLimit: logRateLimit, + } +} + +func (runInfo DesiredLRPRunInfo) Validate() error { + var validationError ValidationError + + validationError = validationError.Check(runInfo.DesiredLRPKey) + + if runInfo.Setup != nil { + if err := runInfo.Setup.Validate(); err != nil { + validationError = validationError.Append(ErrInvalidField{"setup"}) + validationError = validationError.Append(err) + } + } + + if runInfo.Action == nil { + validationError = validationError.Append(ErrInvalidActionType) + } else if err := runInfo.Action.Validate(); err != nil { + validationError = validationError.Append(ErrInvalidField{"action"}) + validationError = validationError.Append(err) + } + + if runInfo.Monitor != nil { + if err := runInfo.Monitor.Validate(); err != nil { + validationError = validationError.Append(ErrInvalidField{"monitor"}) + validationError = validationError.Append(err) + } + } + + for _, envVar := range runInfo.EnvironmentVariables { + validationError = validationError.Check(envVar) + } + + for _, rule := range runInfo.EgressRules { + err := rule.Validate() + if err != nil { + validationError = validationError.Append(ErrInvalidField{"egress_rules"}) + validationError = validationError.Append(err) + } + } + + err := validateCachedDependencies(runInfo.CachedDependencies) + if err != nil { + validationError = validationError.Append(err) + } + + err = validateImageLayers(runInfo.ImageLayers, runInfo.LegacyDownloadUser) + if err != nil { + validationError = validationError.Append(err) + } + + if runInfo.MetricTags == nil { + validationError = validationError.Append(ErrInvalidField{"metric_tags"}) + } + + err = validateMetricTags(runInfo.MetricTags, runInfo.GetMetricsGuid()) + if err != nil { + validationError = validationError.Append(ErrInvalidField{"metric_tags"}) + validationError = validationError.Append(err) + } + + err = validateSidecars(runInfo.Sidecars) + if err != nil { + validationError = validationError.Append(ErrInvalidField{"sidecars"}) + validationError = validationError.Append(err) + } + + for _, mount := range runInfo.VolumeMounts { + validationError = validationError.Check(mount) + } + + if runInfo.ImageUsername == "" && runInfo.ImagePassword != "" { + validationError = validationError.Append(ErrInvalidField{"image_username"}) + } + + if runInfo.ImageUsername != "" && runInfo.ImagePassword == "" { + validationError = validationError.Append(ErrInvalidField{"image_password"}) + } + + if runInfo.CheckDefinition != nil { + if err := runInfo.CheckDefinition.Validate(); err != nil { + validationError = validationError.Append(ErrInvalidField{"check_definition"}) + validationError = validationError.Append(err) + } + } + + if limit := runInfo.LogRateLimit; limit != nil { + if limit.BytesPerSecond < -1 { + validationError = validationError.Append(ErrInvalidField{"log_rate_limit"}) + } + } + + return validationError.ToError() +} + +func (*CertificateProperties) Version() format.Version { + return format.V0 +} + +func (CertificateProperties) Validate() error { + return nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/desired_lrp.pb.go b/vendor/code.cloudfoundry.org/bbs/models/desired_lrp.pb.go new file mode 100644 index 00000000..a1c73745 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/desired_lrp.pb.go @@ -0,0 +1,7240 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: desired_lrp.proto + +package models + +import ( + bytes "bytes" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type DesiredLRPSchedulingInfo struct { + DesiredLRPKey `protobuf:"bytes,1,opt,name=desired_lrp_key,json=desiredLrpKey,proto3,embedded=desired_lrp_key" json:""` + Annotation string `protobuf:"bytes,2,opt,name=annotation,proto3" json:"annotation"` + Instances int32 `protobuf:"varint,3,opt,name=instances,proto3" json:"instances"` + DesiredLRPResource `protobuf:"bytes,4,opt,name=desired_lrp_resource,json=desiredLrpResource,proto3,embedded=desired_lrp_resource" json:""` + Routes Routes `protobuf:"bytes,5,opt,name=routes,proto3,customtype=Routes" json:"routes"` + ModificationTag `protobuf:"bytes,6,opt,name=modification_tag,json=modificationTag,proto3,embedded=modification_tag" json:""` + VolumePlacement *VolumePlacement `protobuf:"bytes,7,opt,name=volume_placement,json=volumePlacement,proto3" json:"volume_placement,omitempty"` + PlacementTags []string `protobuf:"bytes,8,rep,name=PlacementTags,proto3" json:"placement_tags,omitempty"` +} + +func (m *DesiredLRPSchedulingInfo) Reset() { *m = DesiredLRPSchedulingInfo{} } +func (*DesiredLRPSchedulingInfo) ProtoMessage() {} +func (*DesiredLRPSchedulingInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_f592e9299b63d68c, []int{0} +} +func (m *DesiredLRPSchedulingInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPSchedulingInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPSchedulingInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPSchedulingInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPSchedulingInfo.Merge(m, src) +} +func (m *DesiredLRPSchedulingInfo) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPSchedulingInfo) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPSchedulingInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPSchedulingInfo proto.InternalMessageInfo + +func (m *DesiredLRPSchedulingInfo) GetAnnotation() string { + if m != nil { + return m.Annotation + } + return "" +} + +func (m *DesiredLRPSchedulingInfo) GetInstances() int32 { + if m != nil { + return m.Instances + } + return 0 +} + +func (m *DesiredLRPSchedulingInfo) GetVolumePlacement() *VolumePlacement { + if m != nil { + return m.VolumePlacement + } + return nil +} + +func (m *DesiredLRPSchedulingInfo) GetPlacementTags() []string { + if m != nil { + return m.PlacementTags + } + return nil +} + +type DesiredLRPRunInfo struct { + DesiredLRPKey `protobuf:"bytes,1,opt,name=desired_lrp_key,json=desiredLrpKey,proto3,embedded=desired_lrp_key" json:""` + EnvironmentVariables []EnvironmentVariable `protobuf:"bytes,2,rep,name=environment_variables,json=environmentVariables,proto3" json:"env"` + Setup *Action `protobuf:"bytes,3,opt,name=setup,proto3" json:"setup,omitempty"` + Action *Action `protobuf:"bytes,4,opt,name=action,proto3" json:"action,omitempty"` + Monitor *Action `protobuf:"bytes,5,opt,name=monitor,proto3" json:"monitor,omitempty"` + DeprecatedStartTimeoutS uint32 `protobuf:"varint,6,opt,name=deprecated_start_timeout_s,json=deprecatedStartTimeoutS,proto3" json:"start_timeout,omitempty"` // Deprecated: Do not use. + Privileged bool `protobuf:"varint,7,opt,name=privileged,proto3" json:"privileged"` + CpuWeight uint32 `protobuf:"varint,8,opt,name=cpu_weight,json=cpuWeight,proto3" json:"cpu_weight"` + Ports []uint32 `protobuf:"varint,9,rep,name=ports,proto3" json:"ports,omitempty"` + EgressRules []SecurityGroupRule `protobuf:"bytes,10,rep,name=egress_rules,json=egressRules,proto3" json:"egress_rules"` + LogSource string `protobuf:"bytes,11,opt,name=log_source,json=logSource,proto3" json:"log_source"` + MetricsGuid string `protobuf:"bytes,12,opt,name=metrics_guid,json=metricsGuid,proto3" json:"metrics_guid"` // Deprecated: Do not use. + CreatedAt int64 `protobuf:"varint,13,opt,name=created_at,json=createdAt,proto3" json:"created_at"` + CachedDependencies []*CachedDependency `protobuf:"bytes,14,rep,name=cached_dependencies,json=cachedDependencies,proto3" json:"cached_dependencies,omitempty"` + LegacyDownloadUser string `protobuf:"bytes,15,opt,name=legacy_download_user,json=legacyDownloadUser,proto3" json:"legacy_download_user,omitempty"` // Deprecated: Do not use. + TrustedSystemCertificatesPath string `protobuf:"bytes,16,opt,name=trusted_system_certificates_path,json=trustedSystemCertificatesPath,proto3" json:"trusted_system_certificates_path,omitempty"` + VolumeMounts []*VolumeMount `protobuf:"bytes,17,rep,name=volume_mounts,json=volumeMounts,proto3" json:"volume_mounts,omitempty"` + Network *Network `protobuf:"bytes,18,opt,name=network,proto3" json:"network,omitempty"` + StartTimeoutMs int64 `protobuf:"varint,19,opt,name=start_timeout_ms,json=startTimeoutMs,proto3" json:"start_timeout_ms"` + CertificateProperties *CertificateProperties `protobuf:"bytes,20,opt,name=certificate_properties,json=certificateProperties,proto3" json:"certificate_properties,omitempty"` + ImageUsername string `protobuf:"bytes,21,opt,name=image_username,json=imageUsername,proto3" json:"image_username,omitempty"` + ImagePassword string `protobuf:"bytes,22,opt,name=image_password,json=imagePassword,proto3" json:"image_password,omitempty"` + CheckDefinition *CheckDefinition `protobuf:"bytes,23,opt,name=check_definition,json=checkDefinition,proto3" json:"check_definition,omitempty"` + ImageLayers []*ImageLayer `protobuf:"bytes,24,rep,name=image_layers,json=imageLayers,proto3" json:"image_layers,omitempty"` + MetricTags map[string]*MetricTagValue `protobuf:"bytes,25,rep,name=metric_tags,json=metricTags,proto3" json:"metric_tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Sidecars []*Sidecar `protobuf:"bytes,26,rep,name=sidecars,proto3" json:"sidecars,omitempty"` + LogRateLimit *LogRateLimit `protobuf:"bytes,27,opt,name=log_rate_limit,json=logRateLimit,proto3" json:"log_rate_limit,omitempty"` +} + +func (m *DesiredLRPRunInfo) Reset() { *m = DesiredLRPRunInfo{} } +func (*DesiredLRPRunInfo) ProtoMessage() {} +func (*DesiredLRPRunInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_f592e9299b63d68c, []int{1} +} +func (m *DesiredLRPRunInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPRunInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPRunInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPRunInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPRunInfo.Merge(m, src) +} +func (m *DesiredLRPRunInfo) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPRunInfo) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPRunInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPRunInfo proto.InternalMessageInfo + +func (m *DesiredLRPRunInfo) GetEnvironmentVariables() []EnvironmentVariable { + if m != nil { + return m.EnvironmentVariables + } + return nil +} + +func (m *DesiredLRPRunInfo) GetSetup() *Action { + if m != nil { + return m.Setup + } + return nil +} + +func (m *DesiredLRPRunInfo) GetAction() *Action { + if m != nil { + return m.Action + } + return nil +} + +func (m *DesiredLRPRunInfo) GetMonitor() *Action { + if m != nil { + return m.Monitor + } + return nil +} + +// Deprecated: Do not use. +func (m *DesiredLRPRunInfo) GetDeprecatedStartTimeoutS() uint32 { + if m != nil { + return m.DeprecatedStartTimeoutS + } + return 0 +} + +func (m *DesiredLRPRunInfo) GetPrivileged() bool { + if m != nil { + return m.Privileged + } + return false +} + +func (m *DesiredLRPRunInfo) GetCpuWeight() uint32 { + if m != nil { + return m.CpuWeight + } + return 0 +} + +func (m *DesiredLRPRunInfo) GetPorts() []uint32 { + if m != nil { + return m.Ports + } + return nil +} + +func (m *DesiredLRPRunInfo) GetEgressRules() []SecurityGroupRule { + if m != nil { + return m.EgressRules + } + return nil +} + +func (m *DesiredLRPRunInfo) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +// Deprecated: Do not use. +func (m *DesiredLRPRunInfo) GetMetricsGuid() string { + if m != nil { + return m.MetricsGuid + } + return "" +} + +func (m *DesiredLRPRunInfo) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +func (m *DesiredLRPRunInfo) GetCachedDependencies() []*CachedDependency { + if m != nil { + return m.CachedDependencies + } + return nil +} + +// Deprecated: Do not use. +func (m *DesiredLRPRunInfo) GetLegacyDownloadUser() string { + if m != nil { + return m.LegacyDownloadUser + } + return "" +} + +func (m *DesiredLRPRunInfo) GetTrustedSystemCertificatesPath() string { + if m != nil { + return m.TrustedSystemCertificatesPath + } + return "" +} + +func (m *DesiredLRPRunInfo) GetVolumeMounts() []*VolumeMount { + if m != nil { + return m.VolumeMounts + } + return nil +} + +func (m *DesiredLRPRunInfo) GetNetwork() *Network { + if m != nil { + return m.Network + } + return nil +} + +func (m *DesiredLRPRunInfo) GetStartTimeoutMs() int64 { + if m != nil { + return m.StartTimeoutMs + } + return 0 +} + +func (m *DesiredLRPRunInfo) GetCertificateProperties() *CertificateProperties { + if m != nil { + return m.CertificateProperties + } + return nil +} + +func (m *DesiredLRPRunInfo) GetImageUsername() string { + if m != nil { + return m.ImageUsername + } + return "" +} + +func (m *DesiredLRPRunInfo) GetImagePassword() string { + if m != nil { + return m.ImagePassword + } + return "" +} + +func (m *DesiredLRPRunInfo) GetCheckDefinition() *CheckDefinition { + if m != nil { + return m.CheckDefinition + } + return nil +} + +func (m *DesiredLRPRunInfo) GetImageLayers() []*ImageLayer { + if m != nil { + return m.ImageLayers + } + return nil +} + +func (m *DesiredLRPRunInfo) GetMetricTags() map[string]*MetricTagValue { + if m != nil { + return m.MetricTags + } + return nil +} + +func (m *DesiredLRPRunInfo) GetSidecars() []*Sidecar { + if m != nil { + return m.Sidecars + } + return nil +} + +func (m *DesiredLRPRunInfo) GetLogRateLimit() *LogRateLimit { + if m != nil { + return m.LogRateLimit + } + return nil +} + +// helper message for marshalling routes +type ProtoRoutes struct { + Routes map[string][]byte `protobuf:"bytes,1,rep,name=routes,proto3" json:"routes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *ProtoRoutes) Reset() { *m = ProtoRoutes{} } +func (*ProtoRoutes) ProtoMessage() {} +func (*ProtoRoutes) Descriptor() ([]byte, []int) { + return fileDescriptor_f592e9299b63d68c, []int{2} +} +func (m *ProtoRoutes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProtoRoutes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProtoRoutes.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ProtoRoutes) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProtoRoutes.Merge(m, src) +} +func (m *ProtoRoutes) XXX_Size() int { + return m.Size() +} +func (m *ProtoRoutes) XXX_DiscardUnknown() { + xxx_messageInfo_ProtoRoutes.DiscardUnknown(m) +} + +var xxx_messageInfo_ProtoRoutes proto.InternalMessageInfo + +func (m *ProtoRoutes) GetRoutes() map[string][]byte { + if m != nil { + return m.Routes + } + return nil +} + +type DesiredLRPUpdate struct { + // Types that are valid to be assigned to OptionalInstances: + // + // *DesiredLRPUpdate_Instances + OptionalInstances isDesiredLRPUpdate_OptionalInstances `protobuf_oneof:"optional_instances"` + Routes *Routes `protobuf:"bytes,2,opt,name=routes,proto3,customtype=Routes" json:"routes,omitempty"` + // Types that are valid to be assigned to OptionalAnnotation: + // + // *DesiredLRPUpdate_Annotation + OptionalAnnotation isDesiredLRPUpdate_OptionalAnnotation `protobuf_oneof:"optional_annotation"` + MetricTags map[string]*MetricTagValue `protobuf:"bytes,4,rep,name=metric_tags,json=metricTags,proto3" json:"metric_tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *DesiredLRPUpdate) Reset() { *m = DesiredLRPUpdate{} } +func (*DesiredLRPUpdate) ProtoMessage() {} +func (*DesiredLRPUpdate) Descriptor() ([]byte, []int) { + return fileDescriptor_f592e9299b63d68c, []int{3} +} +func (m *DesiredLRPUpdate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPUpdate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPUpdate) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPUpdate.Merge(m, src) +} +func (m *DesiredLRPUpdate) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPUpdate) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPUpdate.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPUpdate proto.InternalMessageInfo + +type isDesiredLRPUpdate_OptionalInstances interface { + isDesiredLRPUpdate_OptionalInstances() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} +type isDesiredLRPUpdate_OptionalAnnotation interface { + isDesiredLRPUpdate_OptionalAnnotation() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type DesiredLRPUpdate_Instances struct { + Instances int32 `protobuf:"varint,1,opt,name=instances,proto3,oneof" json:"instances,omitempty"` +} +type DesiredLRPUpdate_Annotation struct { + Annotation string `protobuf:"bytes,3,opt,name=annotation,proto3,oneof" json:"annotation,omitempty"` +} + +func (*DesiredLRPUpdate_Instances) isDesiredLRPUpdate_OptionalInstances() {} +func (*DesiredLRPUpdate_Annotation) isDesiredLRPUpdate_OptionalAnnotation() {} + +func (m *DesiredLRPUpdate) GetOptionalInstances() isDesiredLRPUpdate_OptionalInstances { + if m != nil { + return m.OptionalInstances + } + return nil +} +func (m *DesiredLRPUpdate) GetOptionalAnnotation() isDesiredLRPUpdate_OptionalAnnotation { + if m != nil { + return m.OptionalAnnotation + } + return nil +} + +func (m *DesiredLRPUpdate) GetInstances() int32 { + if x, ok := m.GetOptionalInstances().(*DesiredLRPUpdate_Instances); ok { + return x.Instances + } + return 0 +} + +func (m *DesiredLRPUpdate) GetAnnotation() string { + if x, ok := m.GetOptionalAnnotation().(*DesiredLRPUpdate_Annotation); ok { + return x.Annotation + } + return "" +} + +func (m *DesiredLRPUpdate) GetMetricTags() map[string]*MetricTagValue { + if m != nil { + return m.MetricTags + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*DesiredLRPUpdate) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*DesiredLRPUpdate_Instances)(nil), + (*DesiredLRPUpdate_Annotation)(nil), + } +} + +type DesiredLRPKey struct { + ProcessGuid string `protobuf:"bytes,1,opt,name=process_guid,json=processGuid,proto3" json:"process_guid"` + Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain"` + LogGuid string `protobuf:"bytes,3,opt,name=log_guid,json=logGuid,proto3" json:"log_guid"` +} + +func (m *DesiredLRPKey) Reset() { *m = DesiredLRPKey{} } +func (*DesiredLRPKey) ProtoMessage() {} +func (*DesiredLRPKey) Descriptor() ([]byte, []int) { + return fileDescriptor_f592e9299b63d68c, []int{4} +} +func (m *DesiredLRPKey) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPKey.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPKey.Merge(m, src) +} +func (m *DesiredLRPKey) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPKey) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPKey.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPKey proto.InternalMessageInfo + +func (m *DesiredLRPKey) GetProcessGuid() string { + if m != nil { + return m.ProcessGuid + } + return "" +} + +func (m *DesiredLRPKey) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +func (m *DesiredLRPKey) GetLogGuid() string { + if m != nil { + return m.LogGuid + } + return "" +} + +type DesiredLRPResource struct { + MemoryMb int32 `protobuf:"varint,1,opt,name=memory_mb,json=memoryMb,proto3" json:"memory_mb"` + DiskMb int32 `protobuf:"varint,2,opt,name=disk_mb,json=diskMb,proto3" json:"disk_mb"` + RootFs string `protobuf:"bytes,3,opt,name=root_fs,json=rootFs,proto3" json:"rootfs"` + MaxPids int32 `protobuf:"varint,4,opt,name=max_pids,json=maxPids,proto3" json:"max_pids"` +} + +func (m *DesiredLRPResource) Reset() { *m = DesiredLRPResource{} } +func (*DesiredLRPResource) ProtoMessage() {} +func (*DesiredLRPResource) Descriptor() ([]byte, []int) { + return fileDescriptor_f592e9299b63d68c, []int{5} +} +func (m *DesiredLRPResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPResource.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPResource.Merge(m, src) +} +func (m *DesiredLRPResource) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPResource) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPResource.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPResource proto.InternalMessageInfo + +func (m *DesiredLRPResource) GetMemoryMb() int32 { + if m != nil { + return m.MemoryMb + } + return 0 +} + +func (m *DesiredLRPResource) GetDiskMb() int32 { + if m != nil { + return m.DiskMb + } + return 0 +} + +func (m *DesiredLRPResource) GetRootFs() string { + if m != nil { + return m.RootFs + } + return "" +} + +func (m *DesiredLRPResource) GetMaxPids() int32 { + if m != nil { + return m.MaxPids + } + return 0 +} + +type DesiredLRP struct { + ProcessGuid string `protobuf:"bytes,1,opt,name=process_guid,json=processGuid,proto3" json:"process_guid"` + Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain"` + RootFs string `protobuf:"bytes,3,opt,name=root_fs,json=rootFs,proto3" json:"rootfs"` + Instances int32 `protobuf:"varint,4,opt,name=instances,proto3" json:"instances"` + EnvironmentVariables []*EnvironmentVariable `protobuf:"bytes,5,rep,name=environment_variables,json=environmentVariables,proto3" json:"env"` + Setup *Action `protobuf:"bytes,6,opt,name=setup,proto3" json:"setup,omitempty"` + Action *Action `protobuf:"bytes,7,opt,name=action,proto3" json:"action,omitempty"` + StartTimeoutMs int64 `protobuf:"varint,27,opt,name=start_timeout_ms,json=startTimeoutMs,proto3" json:"start_timeout_ms"` + DeprecatedStartTimeoutS uint32 `protobuf:"varint,8,opt,name=deprecated_start_timeout_s,json=deprecatedStartTimeoutS,proto3" json:"deprecated_timeout_ns,omitempty"` // Deprecated: Do not use. + Monitor *Action `protobuf:"bytes,9,opt,name=monitor,proto3" json:"monitor,omitempty"` + DiskMb int32 `protobuf:"varint,10,opt,name=disk_mb,json=diskMb,proto3" json:"disk_mb"` + MemoryMb int32 `protobuf:"varint,11,opt,name=memory_mb,json=memoryMb,proto3" json:"memory_mb"` + CpuWeight uint32 `protobuf:"varint,12,opt,name=cpu_weight,json=cpuWeight,proto3" json:"cpu_weight"` + Privileged bool `protobuf:"varint,13,opt,name=privileged,proto3" json:"privileged"` + Ports []uint32 `protobuf:"varint,14,rep,name=ports,proto3" json:"ports,omitempty"` + Routes *Routes `protobuf:"bytes,15,opt,name=routes,proto3,customtype=Routes" json:"routes,omitempty"` + LogSource string `protobuf:"bytes,16,opt,name=log_source,json=logSource,proto3" json:"log_source"` + LogGuid string `protobuf:"bytes,17,opt,name=log_guid,json=logGuid,proto3" json:"log_guid"` + MetricsGuid string `protobuf:"bytes,18,opt,name=metrics_guid,json=metricsGuid,proto3" json:"metrics_guid"` // Deprecated: Do not use. + Annotation string `protobuf:"bytes,19,opt,name=annotation,proto3" json:"annotation"` + EgressRules []*SecurityGroupRule `protobuf:"bytes,20,rep,name=egress_rules,json=egressRules,proto3" json:"egress_rules,omitempty"` + ModificationTag *ModificationTag `protobuf:"bytes,21,opt,name=modification_tag,json=modificationTag,proto3" json:"modification_tag,omitempty"` + CachedDependencies []*CachedDependency `protobuf:"bytes,22,rep,name=cached_dependencies,json=cachedDependencies,proto3" json:"cached_dependencies,omitempty"` + LegacyDownloadUser string `protobuf:"bytes,23,opt,name=legacy_download_user,json=legacyDownloadUser,proto3" json:"legacy_download_user,omitempty"` // Deprecated: Do not use. + TrustedSystemCertificatesPath string `protobuf:"bytes,24,opt,name=trusted_system_certificates_path,json=trustedSystemCertificatesPath,proto3" json:"trusted_system_certificates_path,omitempty"` + VolumeMounts []*VolumeMount `protobuf:"bytes,25,rep,name=volume_mounts,json=volumeMounts,proto3" json:"volume_mounts,omitempty"` + Network *Network `protobuf:"bytes,26,opt,name=network,proto3" json:"network,omitempty"` + PlacementTags []string `protobuf:"bytes,28,rep,name=PlacementTags,proto3" json:"placement_tags,omitempty"` + MaxPids int32 `protobuf:"varint,29,opt,name=max_pids,json=maxPids,proto3" json:"max_pids"` + CertificateProperties *CertificateProperties `protobuf:"bytes,30,opt,name=certificate_properties,json=certificateProperties,proto3" json:"certificate_properties,omitempty"` + ImageUsername string `protobuf:"bytes,31,opt,name=image_username,json=imageUsername,proto3" json:"image_username,omitempty"` + ImagePassword string `protobuf:"bytes,32,opt,name=image_password,json=imagePassword,proto3" json:"image_password,omitempty"` + CheckDefinition *CheckDefinition `protobuf:"bytes,33,opt,name=check_definition,json=checkDefinition,proto3" json:"check_definition,omitempty"` + ImageLayers []*ImageLayer `protobuf:"bytes,34,rep,name=image_layers,json=imageLayers,proto3" json:"image_layers,omitempty"` + MetricTags map[string]*MetricTagValue `protobuf:"bytes,35,rep,name=metric_tags,json=metricTags,proto3" json:"metric_tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Sidecars []*Sidecar `protobuf:"bytes,36,rep,name=sidecars,proto3" json:"sidecars,omitempty"` + LogRateLimit *LogRateLimit `protobuf:"bytes,37,opt,name=log_rate_limit,json=logRateLimit,proto3" json:"log_rate_limit,omitempty"` +} + +func (m *DesiredLRP) Reset() { *m = DesiredLRP{} } +func (*DesiredLRP) ProtoMessage() {} +func (*DesiredLRP) Descriptor() ([]byte, []int) { + return fileDescriptor_f592e9299b63d68c, []int{6} +} +func (m *DesiredLRP) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRP.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRP) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRP.Merge(m, src) +} +func (m *DesiredLRP) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRP) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRP.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRP proto.InternalMessageInfo + +func (m *DesiredLRP) GetProcessGuid() string { + if m != nil { + return m.ProcessGuid + } + return "" +} + +func (m *DesiredLRP) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +func (m *DesiredLRP) GetRootFs() string { + if m != nil { + return m.RootFs + } + return "" +} + +func (m *DesiredLRP) GetInstances() int32 { + if m != nil { + return m.Instances + } + return 0 +} + +func (m *DesiredLRP) GetEnvironmentVariables() []*EnvironmentVariable { + if m != nil { + return m.EnvironmentVariables + } + return nil +} + +func (m *DesiredLRP) GetSetup() *Action { + if m != nil { + return m.Setup + } + return nil +} + +func (m *DesiredLRP) GetAction() *Action { + if m != nil { + return m.Action + } + return nil +} + +func (m *DesiredLRP) GetStartTimeoutMs() int64 { + if m != nil { + return m.StartTimeoutMs + } + return 0 +} + +// Deprecated: Do not use. +func (m *DesiredLRP) GetDeprecatedStartTimeoutS() uint32 { + if m != nil { + return m.DeprecatedStartTimeoutS + } + return 0 +} + +func (m *DesiredLRP) GetMonitor() *Action { + if m != nil { + return m.Monitor + } + return nil +} + +func (m *DesiredLRP) GetDiskMb() int32 { + if m != nil { + return m.DiskMb + } + return 0 +} + +func (m *DesiredLRP) GetMemoryMb() int32 { + if m != nil { + return m.MemoryMb + } + return 0 +} + +func (m *DesiredLRP) GetCpuWeight() uint32 { + if m != nil { + return m.CpuWeight + } + return 0 +} + +func (m *DesiredLRP) GetPrivileged() bool { + if m != nil { + return m.Privileged + } + return false +} + +func (m *DesiredLRP) GetPorts() []uint32 { + if m != nil { + return m.Ports + } + return nil +} + +func (m *DesiredLRP) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +func (m *DesiredLRP) GetLogGuid() string { + if m != nil { + return m.LogGuid + } + return "" +} + +// Deprecated: Do not use. +func (m *DesiredLRP) GetMetricsGuid() string { + if m != nil { + return m.MetricsGuid + } + return "" +} + +func (m *DesiredLRP) GetAnnotation() string { + if m != nil { + return m.Annotation + } + return "" +} + +func (m *DesiredLRP) GetEgressRules() []*SecurityGroupRule { + if m != nil { + return m.EgressRules + } + return nil +} + +func (m *DesiredLRP) GetModificationTag() *ModificationTag { + if m != nil { + return m.ModificationTag + } + return nil +} + +func (m *DesiredLRP) GetCachedDependencies() []*CachedDependency { + if m != nil { + return m.CachedDependencies + } + return nil +} + +// Deprecated: Do not use. +func (m *DesiredLRP) GetLegacyDownloadUser() string { + if m != nil { + return m.LegacyDownloadUser + } + return "" +} + +func (m *DesiredLRP) GetTrustedSystemCertificatesPath() string { + if m != nil { + return m.TrustedSystemCertificatesPath + } + return "" +} + +func (m *DesiredLRP) GetVolumeMounts() []*VolumeMount { + if m != nil { + return m.VolumeMounts + } + return nil +} + +func (m *DesiredLRP) GetNetwork() *Network { + if m != nil { + return m.Network + } + return nil +} + +func (m *DesiredLRP) GetPlacementTags() []string { + if m != nil { + return m.PlacementTags + } + return nil +} + +func (m *DesiredLRP) GetMaxPids() int32 { + if m != nil { + return m.MaxPids + } + return 0 +} + +func (m *DesiredLRP) GetCertificateProperties() *CertificateProperties { + if m != nil { + return m.CertificateProperties + } + return nil +} + +func (m *DesiredLRP) GetImageUsername() string { + if m != nil { + return m.ImageUsername + } + return "" +} + +func (m *DesiredLRP) GetImagePassword() string { + if m != nil { + return m.ImagePassword + } + return "" +} + +func (m *DesiredLRP) GetCheckDefinition() *CheckDefinition { + if m != nil { + return m.CheckDefinition + } + return nil +} + +func (m *DesiredLRP) GetImageLayers() []*ImageLayer { + if m != nil { + return m.ImageLayers + } + return nil +} + +func (m *DesiredLRP) GetMetricTags() map[string]*MetricTagValue { + if m != nil { + return m.MetricTags + } + return nil +} + +func (m *DesiredLRP) GetSidecars() []*Sidecar { + if m != nil { + return m.Sidecars + } + return nil +} + +func (m *DesiredLRP) GetLogRateLimit() *LogRateLimit { + if m != nil { + return m.LogRateLimit + } + return nil +} + +func init() { + proto.RegisterType((*DesiredLRPSchedulingInfo)(nil), "models.DesiredLRPSchedulingInfo") + proto.RegisterType((*DesiredLRPRunInfo)(nil), "models.DesiredLRPRunInfo") + proto.RegisterMapType((map[string]*MetricTagValue)(nil), "models.DesiredLRPRunInfo.MetricTagsEntry") + proto.RegisterType((*ProtoRoutes)(nil), "models.ProtoRoutes") + proto.RegisterMapType((map[string][]byte)(nil), "models.ProtoRoutes.RoutesEntry") + proto.RegisterType((*DesiredLRPUpdate)(nil), "models.DesiredLRPUpdate") + proto.RegisterMapType((map[string]*MetricTagValue)(nil), "models.DesiredLRPUpdate.MetricTagsEntry") + proto.RegisterType((*DesiredLRPKey)(nil), "models.DesiredLRPKey") + proto.RegisterType((*DesiredLRPResource)(nil), "models.DesiredLRPResource") + proto.RegisterType((*DesiredLRP)(nil), "models.DesiredLRP") + proto.RegisterMapType((map[string]*MetricTagValue)(nil), "models.DesiredLRP.MetricTagsEntry") +} + +func init() { proto.RegisterFile("desired_lrp.proto", fileDescriptor_f592e9299b63d68c) } + +var fileDescriptor_f592e9299b63d68c = []byte{ + // 1791 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x58, 0x4f, 0x6f, 0x1b, 0xc7, + 0x15, 0xe7, 0xea, 0x0f, 0x29, 0x0e, 0x49, 0x89, 0x1a, 0x51, 0xd2, 0x98, 0xb6, 0xb9, 0x2c, 0x63, + 0xb7, 0x4c, 0x93, 0x28, 0x80, 0x93, 0xa2, 0x69, 0x5a, 0x14, 0xc8, 0xda, 0xa9, 0xe3, 0x5a, 0x2a, + 0x84, 0x91, 0xed, 0xa2, 0x01, 0x8a, 0xc5, 0x6a, 0x77, 0xb4, 0x5a, 0x78, 0x77, 0x67, 0xb1, 0x33, + 0x2b, 0x87, 0xb7, 0xf6, 0xd0, 0x7b, 0xfb, 0x2d, 0xfa, 0x01, 0x0a, 0xf4, 0xd4, 0x7b, 0x8e, 0x3e, + 0x06, 0x3d, 0x10, 0xb1, 0x7c, 0x29, 0x78, 0xca, 0x47, 0x28, 0x66, 0xf6, 0x3f, 0x49, 0x53, 0x74, + 0x62, 0x23, 0x27, 0xce, 0xfc, 0xde, 0x9f, 0x7d, 0xf3, 0xe6, 0xed, 0x7b, 0xbf, 0x25, 0xd8, 0xb6, + 0x08, 0x73, 0x42, 0x62, 0xe9, 0x6e, 0x18, 0x1c, 0x04, 0x21, 0xe5, 0x14, 0x56, 0x3d, 0x6a, 0x11, + 0x97, 0x75, 0x3f, 0xb0, 0x1d, 0x7e, 0x1e, 0x9d, 0x1e, 0x98, 0xd4, 0xfb, 0xd0, 0xa6, 0x36, 0xfd, + 0x50, 0x8a, 0x4f, 0xa3, 0x33, 0xb9, 0x93, 0x1b, 0xb9, 0x8a, 0xcd, 0xba, 0x2d, 0xc3, 0xe4, 0x0e, + 0xf5, 0x59, 0xb2, 0xdd, 0x37, 0x0d, 0xf3, 0x9c, 0x58, 0xba, 0x45, 0x02, 0xe2, 0x5b, 0xc4, 0x37, + 0x47, 0x89, 0xe0, 0x86, 0x49, 0x42, 0xee, 0x9c, 0x39, 0xa6, 0xc1, 0x89, 0x1e, 0x84, 0x34, 0x10, + 0x5b, 0x92, 0x9a, 0x5d, 0x27, 0xfe, 0x85, 0x13, 0x52, 0xdf, 0x23, 0x3e, 0xd7, 0x2f, 0x8c, 0xd0, + 0x31, 0x4e, 0xdd, 0x4c, 0xb8, 0xe7, 0x51, 0x2b, 0xb6, 0x74, 0xa8, 0xaf, 0x73, 0xc3, 0x4e, 0x1f, + 0xed, 0x13, 0xfe, 0x8c, 0x86, 0x4f, 0x93, 0x6d, 0x87, 0x11, 0x33, 0x0a, 0x1d, 0x3e, 0xd2, 0xed, + 0x90, 0x46, 0xc9, 0xb1, 0xba, 0xf0, 0x82, 0xba, 0x91, 0x47, 0x74, 0x8f, 0x46, 0x3e, 0x4f, 0x1d, + 0x9a, 0xe7, 0xc4, 0x7c, 0xaa, 0x5b, 0xe4, 0xcc, 0xf1, 0x1d, 0xe1, 0x34, 0xc1, 0xb7, 0x1d, 0xcf, + 0xb0, 0x89, 0xee, 0x1a, 0x23, 0x12, 0xa6, 0x90, 0x47, 0x78, 0xe8, 0x98, 0xe2, 0xa9, 0x69, 0x38, + 0x2d, 0xe6, 0x58, 0xc4, 0x34, 0x52, 0x8d, 0x8e, 0x4b, 0x6d, 0x3d, 0x14, 0xa7, 0x72, 0x1d, 0xcf, + 0x49, 0x1e, 0x31, 0xf8, 0xf7, 0x1a, 0x40, 0xf7, 0xe2, 0x1c, 0x1f, 0xe2, 0xe3, 0x13, 0x91, 0x93, + 0xc8, 0x75, 0x7c, 0xfb, 0x81, 0x7f, 0x46, 0xe1, 0x43, 0xb0, 0x55, 0xc8, 0xbf, 0xfe, 0x94, 0x8c, + 0x90, 0xd2, 0x57, 0x86, 0x8d, 0x3b, 0xbb, 0x07, 0xf1, 0x25, 0x1c, 0xe4, 0xa6, 0x0f, 0xc9, 0x48, + 0x6b, 0x7e, 0x3d, 0x56, 0x2b, 0xcf, 0xc7, 0xaa, 0x32, 0x19, 0xab, 0x15, 0xdc, 0x4a, 0x6c, 0x0f, + 0xc3, 0xe0, 0x21, 0x19, 0xc1, 0x03, 0x00, 0x0c, 0xdf, 0xa7, 0x5c, 0x66, 0x07, 0xad, 0xf4, 0x95, + 0x61, 0x5d, 0xdb, 0x9c, 0x8c, 0xd5, 0x02, 0x8a, 0x0b, 0x6b, 0xf8, 0x1e, 0xa8, 0x3b, 0x3e, 0xe3, + 0x86, 0x6f, 0x12, 0x86, 0x56, 0xfb, 0xca, 0x70, 0x5d, 0x6b, 0x4d, 0xc6, 0x6a, 0x0e, 0xe2, 0x7c, + 0x09, 0xbf, 0x04, 0x9d, 0x62, 0xa4, 0x21, 0x61, 0x34, 0x0a, 0x4d, 0x82, 0xd6, 0x64, 0xb8, 0xdd, + 0xd9, 0x70, 0x71, 0xa2, 0x31, 0x15, 0x33, 0xcc, 0x63, 0x4e, 0x35, 0xe0, 0xaf, 0x41, 0x35, 0xa4, + 0x11, 0x27, 0x0c, 0xad, 0x4b, 0x6f, 0x3b, 0xa9, 0xb7, 0x63, 0x91, 0x41, 0x2c, 0x45, 0xda, 0xa6, + 0x70, 0xf3, 0xdf, 0xb1, 0x5a, 0x8d, 0xf7, 0x38, 0x31, 0x81, 0xc7, 0xa0, 0x3d, 0x5d, 0x15, 0xa8, + 0x2a, 0xdd, 0xec, 0xa7, 0x6e, 0x8e, 0x0a, 0xf2, 0x47, 0x86, 0x3d, 0x15, 0xd1, 0x96, 0x57, 0x16, + 0x43, 0x0d, 0xb4, 0x93, 0x52, 0x09, 0x5c, 0xc3, 0x24, 0xa2, 0x12, 0x51, 0xad, 0xec, 0xf1, 0x89, + 0x94, 0x1f, 0xa7, 0x62, 0xbc, 0x75, 0x51, 0x06, 0xa0, 0x06, 0x5a, 0xd9, 0xe6, 0x91, 0x61, 0x33, + 0xb4, 0xd1, 0x5f, 0x1d, 0xd6, 0xb5, 0x1b, 0x93, 0xb1, 0x8a, 0x32, 0xaf, 0xb2, 0x96, 0xde, 0xa7, + 0x9e, 0xc3, 0x89, 0x17, 0xf0, 0x11, 0x2e, 0x9b, 0x0c, 0xfe, 0xd6, 0x02, 0xdb, 0x85, 0x7c, 0x46, + 0xfe, 0x9b, 0x2f, 0x99, 0x3f, 0x83, 0xdd, 0xb9, 0xef, 0x1b, 0x5a, 0xe9, 0xaf, 0x0e, 0x1b, 0x77, + 0xae, 0xa7, 0x2e, 0x3f, 0xcf, 0x95, 0x9e, 0x24, 0x3a, 0x5a, 0x43, 0x38, 0x9e, 0x8c, 0xd5, 0x55, + 0xe2, 0x5f, 0xe0, 0x0e, 0x99, 0xd5, 0x60, 0xf0, 0x16, 0x58, 0x67, 0x84, 0x47, 0x81, 0xac, 0xae, + 0xc6, 0x9d, 0xcd, 0xd4, 0xdd, 0x67, 0xb2, 0x53, 0xe0, 0x58, 0x08, 0x7f, 0x0a, 0xaa, 0x71, 0xeb, + 0x48, 0x8a, 0x69, 0x5a, 0x2d, 0x91, 0xc2, 0x21, 0xa8, 0x79, 0xd4, 0x77, 0x38, 0x0d, 0x93, 0x3a, + 0x99, 0x56, 0x4c, 0xc5, 0xf0, 0x4b, 0xd0, 0xb5, 0x48, 0x10, 0x12, 0xd1, 0x62, 0x2c, 0x9d, 0x71, + 0x23, 0xe4, 0x3a, 0x77, 0x3c, 0x42, 0x23, 0xae, 0x33, 0x59, 0x1d, 0x2d, 0xed, 0xe6, 0x64, 0xac, + 0xee, 0x97, 0x44, 0xf9, 0x4d, 0x20, 0x05, 0xef, 0xe7, 0x0e, 0x4e, 0x84, 0xd2, 0xa3, 0x58, 0xe7, + 0x44, 0xbc, 0x65, 0x41, 0xe8, 0x5c, 0x38, 0x2e, 0xb1, 0x89, 0x25, 0xeb, 0x62, 0x23, 0x7e, 0xcb, + 0x72, 0x14, 0x17, 0xd6, 0xf0, 0x03, 0x00, 0xcc, 0x20, 0xd2, 0x9f, 0x11, 0xc7, 0x3e, 0xe7, 0x68, + 0x43, 0x3e, 0x5b, 0xea, 0xe7, 0x28, 0xae, 0x9b, 0x41, 0xf4, 0x47, 0xb9, 0x84, 0x08, 0xac, 0x07, + 0x34, 0xe4, 0x0c, 0xd5, 0xfb, 0xab, 0xc3, 0x96, 0xb6, 0xd2, 0xae, 0xe0, 0x18, 0x80, 0x1a, 0x68, + 0x12, 0x3b, 0x24, 0x8c, 0xe9, 0x61, 0x24, 0xae, 0x08, 0xc8, 0x2b, 0xba, 0x96, 0xe6, 0xe0, 0x24, + 0xe9, 0x79, 0xf7, 0x45, 0xcb, 0xc3, 0x91, 0x4b, 0xb4, 0x35, 0x71, 0x41, 0xb8, 0x11, 0x1b, 0x09, + 0x84, 0x89, 0x60, 0x44, 0x93, 0x4a, 0xde, 0xdd, 0x46, 0xde, 0x22, 0x72, 0x14, 0xd7, 0x5d, 0x6a, + 0x9f, 0xc4, 0x2f, 0xe6, 0x2f, 0x40, 0x33, 0xee, 0x7a, 0x4c, 0xb7, 0x23, 0xc7, 0x42, 0x4d, 0x69, + 0x00, 0x27, 0x63, 0xb5, 0x8c, 0x2b, 0xb8, 0x91, 0xec, 0xef, 0x47, 0x4e, 0x7c, 0xe4, 0x90, 0xc8, + 0xdc, 0x1b, 0x1c, 0xb5, 0xfa, 0xca, 0x70, 0x35, 0x39, 0x72, 0x86, 0xe2, 0x7a, 0xb2, 0xfe, 0x8c, + 0xc3, 0x07, 0x60, 0x67, 0x7a, 0x56, 0x38, 0x84, 0xa1, 0x4d, 0x79, 0x3e, 0x94, 0x9e, 0xef, 0xae, + 0x54, 0xb9, 0x97, 0x4d, 0x13, 0x0c, 0xcd, 0x32, 0xe2, 0x10, 0x06, 0x3f, 0x06, 0x1d, 0x97, 0xd8, + 0x86, 0x39, 0xd2, 0x2d, 0xfa, 0xcc, 0x77, 0xa9, 0x61, 0xe9, 0x11, 0x23, 0x21, 0xda, 0x92, 0x81, + 0xaf, 0x20, 0x05, 0xc3, 0x58, 0x7e, 0x2f, 0x11, 0x3f, 0x66, 0x24, 0x84, 0xf7, 0x41, 0x9f, 0x87, + 0x11, 0x93, 0xb5, 0x32, 0x62, 0x9c, 0x78, 0x7a, 0x61, 0x44, 0x31, 0x3d, 0x30, 0xf8, 0x39, 0x6a, + 0x0b, 0x0f, 0xf8, 0x66, 0xa2, 0x77, 0x22, 0xd5, 0xee, 0x16, 0xb4, 0x8e, 0x0d, 0x7e, 0x0e, 0x3f, + 0x01, 0xad, 0xe2, 0x90, 0x61, 0x68, 0x5b, 0x9e, 0x61, 0xa7, 0xdc, 0x36, 0x8e, 0x84, 0x0c, 0x37, + 0x2f, 0xf2, 0x0d, 0x83, 0xef, 0x82, 0x5a, 0x32, 0xc3, 0x10, 0x94, 0xb5, 0xbd, 0x95, 0xda, 0xfc, + 0x21, 0x86, 0x71, 0x2a, 0x87, 0xbf, 0x05, 0xed, 0x72, 0x45, 0x7b, 0x0c, 0xed, 0xc8, 0x1c, 0x77, + 0x26, 0x63, 0x75, 0x46, 0x86, 0x37, 0x59, 0xa1, 0x7e, 0x8f, 0x44, 0x27, 0xdf, 0x9b, 0x3f, 0x81, + 0x51, 0x47, 0x3e, 0xf9, 0x66, 0x96, 0xf1, 0x5c, 0xeb, 0x38, 0x53, 0x92, 0x55, 0xa5, 0xe0, 0x5d, + 0x73, 0x9e, 0x10, 0xde, 0x06, 0x9b, 0xf1, 0xe4, 0x14, 0x59, 0xf7, 0x0d, 0x8f, 0xa0, 0x5d, 0x99, + 0xb7, 0x96, 0x44, 0x1f, 0x27, 0x60, 0xae, 0x16, 0x18, 0x8c, 0x3d, 0xa3, 0xa1, 0x85, 0xf6, 0x0a, + 0x6a, 0xc7, 0x09, 0x28, 0x1a, 0xf1, 0xf4, 0x7c, 0x46, 0xfb, 0xe5, 0x46, 0x7c, 0x57, 0xc8, 0xef, + 0x65, 0x62, 0xbc, 0x65, 0x96, 0x01, 0x51, 0xc2, 0x85, 0x59, 0xce, 0x10, 0x92, 0x37, 0x02, 0x53, + 0xfb, 0x07, 0x42, 0x76, 0x28, 0x44, 0xb8, 0xe1, 0x64, 0x6b, 0x06, 0x7f, 0x0f, 0x1a, 0x85, 0x79, + 0x8f, 0xae, 0x49, 0xab, 0x77, 0xe7, 0x4c, 0xb9, 0xb8, 0x2b, 0x1f, 0x1c, 0x49, 0x65, 0xd1, 0xb6, + 0x3f, 0xf7, 0x79, 0x38, 0xc2, 0xc0, 0xcb, 0x00, 0xf8, 0x1e, 0xd8, 0x48, 0x88, 0x02, 0x43, 0x5d, + 0xe9, 0x28, 0xbb, 0xdc, 0x93, 0x18, 0xc7, 0x99, 0x02, 0xfc, 0x14, 0x6c, 0x96, 0x69, 0x04, 0xba, + 0x2e, 0x4f, 0xdc, 0x49, 0x4d, 0x0e, 0xa9, 0x8d, 0x0d, 0x4e, 0x0e, 0x85, 0x0c, 0x37, 0xdd, 0xc2, + 0xae, 0xfb, 0x18, 0x6c, 0x4d, 0xc5, 0x01, 0xdb, 0x60, 0x35, 0x9d, 0x10, 0x75, 0x2c, 0x96, 0xf0, + 0x7d, 0xb0, 0x7e, 0x61, 0xb8, 0x11, 0x91, 0x04, 0xa1, 0x71, 0x67, 0x2f, 0x1b, 0x92, 0xa9, 0xe5, + 0x13, 0x21, 0xc5, 0xb1, 0xd2, 0xa7, 0x2b, 0x9f, 0x28, 0x83, 0xbf, 0x2a, 0xa0, 0x51, 0x98, 0xc4, + 0xf0, 0x97, 0xd9, 0xb8, 0x56, 0xe4, 0x69, 0xd4, 0x39, 0xe3, 0xfa, 0x20, 0xfe, 0x89, 0x93, 0x91, + 0xa8, 0x77, 0x7f, 0x05, 0x1a, 0x05, 0x78, 0x4e, 0x6c, 0x9d, 0x62, 0x6c, 0xcd, 0x62, 0x0c, 0xdf, + 0xae, 0x80, 0x76, 0x9e, 0xf5, 0xc7, 0x81, 0x65, 0x70, 0x02, 0x7b, 0x45, 0x02, 0x23, 0xdc, 0xac, + 0x7f, 0x51, 0x29, 0x72, 0x96, 0x9c, 0x57, 0xac, 0x2c, 0xe6, 0x15, 0xca, 0x1c, 0x5e, 0xd1, 0x2f, + 0xb1, 0x29, 0x31, 0xc0, 0xea, 0x5f, 0x28, 0x25, 0xfe, 0xf4, 0xa0, 0x5c, 0x23, 0x6b, 0x32, 0x19, + 0xc3, 0xd9, 0x1a, 0x89, 0xa3, 0x5d, 0x54, 0x22, 0x6f, 0xe9, 0xe6, 0xb4, 0x0e, 0x80, 0x34, 0x10, + 0xb1, 0x1a, 0xae, 0x9e, 0xa5, 0x45, 0xdb, 0x05, 0x3b, 0x19, 0x9a, 0x1f, 0x67, 0xf0, 0x0f, 0x05, + 0xb4, 0x4a, 0xd4, 0x01, 0x7e, 0x04, 0x9a, 0x41, 0x48, 0x4d, 0x31, 0x72, 0xe2, 0x36, 0x2f, 0xbb, + 0x68, 0x5b, 0xb4, 0xff, 0x22, 0x8e, 0x1b, 0xc9, 0x4e, 0x36, 0xff, 0x01, 0xa8, 0x5a, 0xd4, 0x33, + 0x9c, 0x94, 0x81, 0x82, 0xc9, 0x58, 0x4d, 0x10, 0x9c, 0xfc, 0xc2, 0x9f, 0x81, 0x0d, 0x51, 0xe4, + 0xd2, 0xa9, 0xcc, 0xac, 0xd6, 0x9c, 0x8c, 0xd5, 0x0c, 0xc3, 0x35, 0x97, 0xda, 0xc2, 0xd9, 0xe0, + 0x5f, 0x0a, 0x80, 0xb3, 0x94, 0x12, 0xfe, 0x1c, 0xd4, 0x3d, 0xe2, 0xd1, 0x70, 0xa4, 0x7b, 0xa7, + 0xf1, 0xc5, 0xc7, 0xcc, 0x35, 0x03, 0xf1, 0x46, 0xbc, 0x3c, 0x3a, 0x85, 0xb7, 0x40, 0xcd, 0x72, + 0xd8, 0x53, 0xa1, 0xb9, 0x22, 0x35, 0x1b, 0x93, 0xb1, 0x9a, 0x42, 0xb8, 0x2a, 0x16, 0x47, 0xa7, + 0xf0, 0x1d, 0x50, 0x0b, 0x29, 0xe5, 0xfa, 0x19, 0x4b, 0x02, 0x92, 0x61, 0x0b, 0xe8, 0x4c, 0x96, + 0x04, 0xe5, 0xbf, 0x63, 0x22, 0x6c, 0xcf, 0xf8, 0x4a, 0x0f, 0x1c, 0x8b, 0x49, 0xaa, 0xb2, 0x1e, + 0x87, 0x9d, 0x62, 0xb8, 0xe6, 0x19, 0x5f, 0x1d, 0x3b, 0x16, 0x1b, 0xfc, 0xa7, 0x0d, 0x40, 0x1e, + 0xf6, 0xdb, 0xcb, 0xe3, 0x52, 0x51, 0x97, 0x68, 0xfe, 0xda, 0x15, 0x34, 0xff, 0x4f, 0xaf, 0x22, + 0x84, 0xeb, 0x57, 0x13, 0xc2, 0xda, 0x92, 0x64, 0xb0, 0xba, 0x1c, 0x19, 0xac, 0x2d, 0x24, 0x83, + 0xf3, 0xa6, 0xe0, 0xf5, 0xd7, 0x98, 0x82, 0xa7, 0x0b, 0x29, 0x62, 0x4c, 0xd3, 0x6e, 0x4f, 0xc6, + 0xaa, 0x5a, 0xd0, 0x4a, 0xe5, 0x3e, 0x5b, 0x8e, 0x2a, 0x16, 0x08, 0x6b, 0x7d, 0x31, 0x61, 0x2d, + 0x14, 0x29, 0x78, 0x75, 0x91, 0x96, 0xca, 0xbe, 0xb1, 0xb8, 0xec, 0xcb, 0xb4, 0xb3, 0x79, 0x15, + 0xed, 0x2c, 0xb3, 0xda, 0xd6, 0x95, 0xac, 0x36, 0xa3, 0xa9, 0x9b, 0xd3, 0x34, 0x35, 0x6f, 0xba, + 0x5b, 0xaf, 0xdf, 0x74, 0xcb, 0xfc, 0xb4, 0x7d, 0x15, 0x3f, 0x2d, 0xf6, 0x91, 0xed, 0x05, 0x7d, + 0x64, 0x86, 0xc8, 0xc2, 0xe5, 0x88, 0x6c, 0xf9, 0x8b, 0x7a, 0xe7, 0xca, 0x2f, 0xea, 0xdf, 0x4c, + 0x51, 0xf4, 0xce, 0x15, 0x14, 0xbd, 0x4c, 0xce, 0xb5, 0x39, 0x5f, 0xb2, 0xbb, 0x0b, 0xbf, 0x64, + 0x67, 0xbf, 0x5d, 0x5f, 0xc1, 0xa5, 0xf7, 0xde, 0x20, 0x97, 0xde, 0xff, 0xc1, 0x5c, 0x1a, 0x7d, + 0x2f, 0x2e, 0x7d, 0xed, 0x7b, 0x70, 0xe9, 0xee, 0x15, 0x5c, 0x7a, 0xe6, 0x33, 0xfd, 0xc6, 0x6b, + 0x7f, 0xa6, 0x97, 0xa6, 0xc2, 0xcd, 0x05, 0x53, 0x61, 0x01, 0xf1, 0xee, 0xbd, 0x05, 0xe2, 0xad, + 0x2e, 0x47, 0xbc, 0xfb, 0xcb, 0x12, 0xef, 0x9f, 0xfc, 0x40, 0xe2, 0x3d, 0x58, 0x8e, 0x78, 0xdf, + 0x2d, 0x93, 0xaa, 0x77, 0xa4, 0xd5, 0x60, 0x96, 0x54, 0x2d, 0xcd, 0xb8, 0x6f, 0xbd, 0x3e, 0xe3, + 0xbe, 0xfd, 0x23, 0x33, 0x6e, 0xed, 0xe3, 0xe7, 0x2f, 0x7a, 0x95, 0x6f, 0x5e, 0xf4, 0x2a, 0xdf, + 0xbd, 0xe8, 0x29, 0x7f, 0xb9, 0xec, 0x29, 0xff, 0xbc, 0xec, 0x29, 0x5f, 0x5f, 0xf6, 0x94, 0xe7, + 0x97, 0x3d, 0xe5, 0xdb, 0xcb, 0x9e, 0xf2, 0xbf, 0xcb, 0x5e, 0xe5, 0xbb, 0xcb, 0x9e, 0xf2, 0xf7, + 0x97, 0xbd, 0xca, 0xf3, 0x97, 0xbd, 0xca, 0x37, 0x2f, 0x7b, 0x95, 0xd3, 0xaa, 0xfc, 0xc3, 0xf1, + 0xa3, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x93, 0xa4, 0x56, 0xea, 0xd3, 0x15, 0x00, 0x00, +} + +func (this *DesiredLRPSchedulingInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPSchedulingInfo) + if !ok { + that2, ok := that.(DesiredLRPSchedulingInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.DesiredLRPKey.Equal(&that1.DesiredLRPKey) { + return false + } + if this.Annotation != that1.Annotation { + return false + } + if this.Instances != that1.Instances { + return false + } + if !this.DesiredLRPResource.Equal(&that1.DesiredLRPResource) { + return false + } + if !this.Routes.Equal(that1.Routes) { + return false + } + if !this.ModificationTag.Equal(&that1.ModificationTag) { + return false + } + if !this.VolumePlacement.Equal(that1.VolumePlacement) { + return false + } + if len(this.PlacementTags) != len(that1.PlacementTags) { + return false + } + for i := range this.PlacementTags { + if this.PlacementTags[i] != that1.PlacementTags[i] { + return false + } + } + return true +} +func (this *DesiredLRPRunInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPRunInfo) + if !ok { + that2, ok := that.(DesiredLRPRunInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.DesiredLRPKey.Equal(&that1.DesiredLRPKey) { + return false + } + if len(this.EnvironmentVariables) != len(that1.EnvironmentVariables) { + return false + } + for i := range this.EnvironmentVariables { + if !this.EnvironmentVariables[i].Equal(&that1.EnvironmentVariables[i]) { + return false + } + } + if !this.Setup.Equal(that1.Setup) { + return false + } + if !this.Action.Equal(that1.Action) { + return false + } + if !this.Monitor.Equal(that1.Monitor) { + return false + } + if this.DeprecatedStartTimeoutS != that1.DeprecatedStartTimeoutS { + return false + } + if this.Privileged != that1.Privileged { + return false + } + if this.CpuWeight != that1.CpuWeight { + return false + } + if len(this.Ports) != len(that1.Ports) { + return false + } + for i := range this.Ports { + if this.Ports[i] != that1.Ports[i] { + return false + } + } + if len(this.EgressRules) != len(that1.EgressRules) { + return false + } + for i := range this.EgressRules { + if !this.EgressRules[i].Equal(&that1.EgressRules[i]) { + return false + } + } + if this.LogSource != that1.LogSource { + return false + } + if this.MetricsGuid != that1.MetricsGuid { + return false + } + if this.CreatedAt != that1.CreatedAt { + return false + } + if len(this.CachedDependencies) != len(that1.CachedDependencies) { + return false + } + for i := range this.CachedDependencies { + if !this.CachedDependencies[i].Equal(that1.CachedDependencies[i]) { + return false + } + } + if this.LegacyDownloadUser != that1.LegacyDownloadUser { + return false + } + if this.TrustedSystemCertificatesPath != that1.TrustedSystemCertificatesPath { + return false + } + if len(this.VolumeMounts) != len(that1.VolumeMounts) { + return false + } + for i := range this.VolumeMounts { + if !this.VolumeMounts[i].Equal(that1.VolumeMounts[i]) { + return false + } + } + if !this.Network.Equal(that1.Network) { + return false + } + if this.StartTimeoutMs != that1.StartTimeoutMs { + return false + } + if !this.CertificateProperties.Equal(that1.CertificateProperties) { + return false + } + if this.ImageUsername != that1.ImageUsername { + return false + } + if this.ImagePassword != that1.ImagePassword { + return false + } + if !this.CheckDefinition.Equal(that1.CheckDefinition) { + return false + } + if len(this.ImageLayers) != len(that1.ImageLayers) { + return false + } + for i := range this.ImageLayers { + if !this.ImageLayers[i].Equal(that1.ImageLayers[i]) { + return false + } + } + if len(this.MetricTags) != len(that1.MetricTags) { + return false + } + for i := range this.MetricTags { + if !this.MetricTags[i].Equal(that1.MetricTags[i]) { + return false + } + } + if len(this.Sidecars) != len(that1.Sidecars) { + return false + } + for i := range this.Sidecars { + if !this.Sidecars[i].Equal(that1.Sidecars[i]) { + return false + } + } + if !this.LogRateLimit.Equal(that1.LogRateLimit) { + return false + } + return true +} +func (this *ProtoRoutes) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ProtoRoutes) + if !ok { + that2, ok := that.(ProtoRoutes) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Routes) != len(that1.Routes) { + return false + } + for i := range this.Routes { + if !bytes.Equal(this.Routes[i], that1.Routes[i]) { + return false + } + } + return true +} +func (this *DesiredLRPUpdate) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPUpdate) + if !ok { + that2, ok := that.(DesiredLRPUpdate) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if that1.OptionalInstances == nil { + if this.OptionalInstances != nil { + return false + } + } else if this.OptionalInstances == nil { + return false + } else if !this.OptionalInstances.Equal(that1.OptionalInstances) { + return false + } + if that1.Routes == nil { + if this.Routes != nil { + return false + } + } else if !this.Routes.Equal(*that1.Routes) { + return false + } + if that1.OptionalAnnotation == nil { + if this.OptionalAnnotation != nil { + return false + } + } else if this.OptionalAnnotation == nil { + return false + } else if !this.OptionalAnnotation.Equal(that1.OptionalAnnotation) { + return false + } + if len(this.MetricTags) != len(that1.MetricTags) { + return false + } + for i := range this.MetricTags { + if !this.MetricTags[i].Equal(that1.MetricTags[i]) { + return false + } + } + return true +} +func (this *DesiredLRPUpdate_Instances) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPUpdate_Instances) + if !ok { + that2, ok := that.(DesiredLRPUpdate_Instances) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Instances != that1.Instances { + return false + } + return true +} +func (this *DesiredLRPUpdate_Annotation) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPUpdate_Annotation) + if !ok { + that2, ok := that.(DesiredLRPUpdate_Annotation) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Annotation != that1.Annotation { + return false + } + return true +} +func (this *DesiredLRPKey) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPKey) + if !ok { + that2, ok := that.(DesiredLRPKey) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ProcessGuid != that1.ProcessGuid { + return false + } + if this.Domain != that1.Domain { + return false + } + if this.LogGuid != that1.LogGuid { + return false + } + return true +} +func (this *DesiredLRPResource) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPResource) + if !ok { + that2, ok := that.(DesiredLRPResource) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.MemoryMb != that1.MemoryMb { + return false + } + if this.DiskMb != that1.DiskMb { + return false + } + if this.RootFs != that1.RootFs { + return false + } + if this.MaxPids != that1.MaxPids { + return false + } + return true +} +func (this *DesiredLRP) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRP) + if !ok { + that2, ok := that.(DesiredLRP) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ProcessGuid != that1.ProcessGuid { + return false + } + if this.Domain != that1.Domain { + return false + } + if this.RootFs != that1.RootFs { + return false + } + if this.Instances != that1.Instances { + return false + } + if len(this.EnvironmentVariables) != len(that1.EnvironmentVariables) { + return false + } + for i := range this.EnvironmentVariables { + if !this.EnvironmentVariables[i].Equal(that1.EnvironmentVariables[i]) { + return false + } + } + if !this.Setup.Equal(that1.Setup) { + return false + } + if !this.Action.Equal(that1.Action) { + return false + } + if this.StartTimeoutMs != that1.StartTimeoutMs { + return false + } + if this.DeprecatedStartTimeoutS != that1.DeprecatedStartTimeoutS { + return false + } + if !this.Monitor.Equal(that1.Monitor) { + return false + } + if this.DiskMb != that1.DiskMb { + return false + } + if this.MemoryMb != that1.MemoryMb { + return false + } + if this.CpuWeight != that1.CpuWeight { + return false + } + if this.Privileged != that1.Privileged { + return false + } + if len(this.Ports) != len(that1.Ports) { + return false + } + for i := range this.Ports { + if this.Ports[i] != that1.Ports[i] { + return false + } + } + if that1.Routes == nil { + if this.Routes != nil { + return false + } + } else if !this.Routes.Equal(*that1.Routes) { + return false + } + if this.LogSource != that1.LogSource { + return false + } + if this.LogGuid != that1.LogGuid { + return false + } + if this.MetricsGuid != that1.MetricsGuid { + return false + } + if this.Annotation != that1.Annotation { + return false + } + if len(this.EgressRules) != len(that1.EgressRules) { + return false + } + for i := range this.EgressRules { + if !this.EgressRules[i].Equal(that1.EgressRules[i]) { + return false + } + } + if !this.ModificationTag.Equal(that1.ModificationTag) { + return false + } + if len(this.CachedDependencies) != len(that1.CachedDependencies) { + return false + } + for i := range this.CachedDependencies { + if !this.CachedDependencies[i].Equal(that1.CachedDependencies[i]) { + return false + } + } + if this.LegacyDownloadUser != that1.LegacyDownloadUser { + return false + } + if this.TrustedSystemCertificatesPath != that1.TrustedSystemCertificatesPath { + return false + } + if len(this.VolumeMounts) != len(that1.VolumeMounts) { + return false + } + for i := range this.VolumeMounts { + if !this.VolumeMounts[i].Equal(that1.VolumeMounts[i]) { + return false + } + } + if !this.Network.Equal(that1.Network) { + return false + } + if len(this.PlacementTags) != len(that1.PlacementTags) { + return false + } + for i := range this.PlacementTags { + if this.PlacementTags[i] != that1.PlacementTags[i] { + return false + } + } + if this.MaxPids != that1.MaxPids { + return false + } + if !this.CertificateProperties.Equal(that1.CertificateProperties) { + return false + } + if this.ImageUsername != that1.ImageUsername { + return false + } + if this.ImagePassword != that1.ImagePassword { + return false + } + if !this.CheckDefinition.Equal(that1.CheckDefinition) { + return false + } + if len(this.ImageLayers) != len(that1.ImageLayers) { + return false + } + for i := range this.ImageLayers { + if !this.ImageLayers[i].Equal(that1.ImageLayers[i]) { + return false + } + } + if len(this.MetricTags) != len(that1.MetricTags) { + return false + } + for i := range this.MetricTags { + if !this.MetricTags[i].Equal(that1.MetricTags[i]) { + return false + } + } + if len(this.Sidecars) != len(that1.Sidecars) { + return false + } + for i := range this.Sidecars { + if !this.Sidecars[i].Equal(that1.Sidecars[i]) { + return false + } + } + if !this.LogRateLimit.Equal(that1.LogRateLimit) { + return false + } + return true +} +func (this *DesiredLRPSchedulingInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 12) + s = append(s, "&models.DesiredLRPSchedulingInfo{") + s = append(s, "DesiredLRPKey: "+strings.Replace(this.DesiredLRPKey.GoString(), `&`, ``, 1)+",\n") + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + s = append(s, "Instances: "+fmt.Sprintf("%#v", this.Instances)+",\n") + s = append(s, "DesiredLRPResource: "+strings.Replace(this.DesiredLRPResource.GoString(), `&`, ``, 1)+",\n") + s = append(s, "Routes: "+fmt.Sprintf("%#v", this.Routes)+",\n") + s = append(s, "ModificationTag: "+strings.Replace(this.ModificationTag.GoString(), `&`, ``, 1)+",\n") + if this.VolumePlacement != nil { + s = append(s, "VolumePlacement: "+fmt.Sprintf("%#v", this.VolumePlacement)+",\n") + } + s = append(s, "PlacementTags: "+fmt.Sprintf("%#v", this.PlacementTags)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPRunInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 31) + s = append(s, "&models.DesiredLRPRunInfo{") + s = append(s, "DesiredLRPKey: "+strings.Replace(this.DesiredLRPKey.GoString(), `&`, ``, 1)+",\n") + if this.EnvironmentVariables != nil { + vs := make([]EnvironmentVariable, len(this.EnvironmentVariables)) + for i := range vs { + vs[i] = this.EnvironmentVariables[i] + } + s = append(s, "EnvironmentVariables: "+fmt.Sprintf("%#v", vs)+",\n") + } + if this.Setup != nil { + s = append(s, "Setup: "+fmt.Sprintf("%#v", this.Setup)+",\n") + } + if this.Action != nil { + s = append(s, "Action: "+fmt.Sprintf("%#v", this.Action)+",\n") + } + if this.Monitor != nil { + s = append(s, "Monitor: "+fmt.Sprintf("%#v", this.Monitor)+",\n") + } + s = append(s, "DeprecatedStartTimeoutS: "+fmt.Sprintf("%#v", this.DeprecatedStartTimeoutS)+",\n") + s = append(s, "Privileged: "+fmt.Sprintf("%#v", this.Privileged)+",\n") + s = append(s, "CpuWeight: "+fmt.Sprintf("%#v", this.CpuWeight)+",\n") + s = append(s, "Ports: "+fmt.Sprintf("%#v", this.Ports)+",\n") + if this.EgressRules != nil { + vs := make([]SecurityGroupRule, len(this.EgressRules)) + for i := range vs { + vs[i] = this.EgressRules[i] + } + s = append(s, "EgressRules: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "MetricsGuid: "+fmt.Sprintf("%#v", this.MetricsGuid)+",\n") + s = append(s, "CreatedAt: "+fmt.Sprintf("%#v", this.CreatedAt)+",\n") + if this.CachedDependencies != nil { + s = append(s, "CachedDependencies: "+fmt.Sprintf("%#v", this.CachedDependencies)+",\n") + } + s = append(s, "LegacyDownloadUser: "+fmt.Sprintf("%#v", this.LegacyDownloadUser)+",\n") + s = append(s, "TrustedSystemCertificatesPath: "+fmt.Sprintf("%#v", this.TrustedSystemCertificatesPath)+",\n") + if this.VolumeMounts != nil { + s = append(s, "VolumeMounts: "+fmt.Sprintf("%#v", this.VolumeMounts)+",\n") + } + if this.Network != nil { + s = append(s, "Network: "+fmt.Sprintf("%#v", this.Network)+",\n") + } + s = append(s, "StartTimeoutMs: "+fmt.Sprintf("%#v", this.StartTimeoutMs)+",\n") + if this.CertificateProperties != nil { + s = append(s, "CertificateProperties: "+fmt.Sprintf("%#v", this.CertificateProperties)+",\n") + } + s = append(s, "ImageUsername: "+fmt.Sprintf("%#v", this.ImageUsername)+",\n") + s = append(s, "ImagePassword: "+fmt.Sprintf("%#v", this.ImagePassword)+",\n") + if this.CheckDefinition != nil { + s = append(s, "CheckDefinition: "+fmt.Sprintf("%#v", this.CheckDefinition)+",\n") + } + if this.ImageLayers != nil { + s = append(s, "ImageLayers: "+fmt.Sprintf("%#v", this.ImageLayers)+",\n") + } + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]*MetricTagValue{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%#v: %#v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + if this.MetricTags != nil { + s = append(s, "MetricTags: "+mapStringForMetricTags+",\n") + } + if this.Sidecars != nil { + s = append(s, "Sidecars: "+fmt.Sprintf("%#v", this.Sidecars)+",\n") + } + if this.LogRateLimit != nil { + s = append(s, "LogRateLimit: "+fmt.Sprintf("%#v", this.LogRateLimit)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ProtoRoutes) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.ProtoRoutes{") + keysForRoutes := make([]string, 0, len(this.Routes)) + for k, _ := range this.Routes { + keysForRoutes = append(keysForRoutes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRoutes) + mapStringForRoutes := "map[string][]byte{" + for _, k := range keysForRoutes { + mapStringForRoutes += fmt.Sprintf("%#v: %#v,", k, this.Routes[k]) + } + mapStringForRoutes += "}" + if this.Routes != nil { + s = append(s, "Routes: "+mapStringForRoutes+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPUpdate) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&models.DesiredLRPUpdate{") + if this.OptionalInstances != nil { + s = append(s, "OptionalInstances: "+fmt.Sprintf("%#v", this.OptionalInstances)+",\n") + } + s = append(s, "Routes: "+fmt.Sprintf("%#v", this.Routes)+",\n") + if this.OptionalAnnotation != nil { + s = append(s, "OptionalAnnotation: "+fmt.Sprintf("%#v", this.OptionalAnnotation)+",\n") + } + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]*MetricTagValue{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%#v: %#v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + if this.MetricTags != nil { + s = append(s, "MetricTags: "+mapStringForMetricTags+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPUpdate_Instances) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&models.DesiredLRPUpdate_Instances{` + + `Instances:` + fmt.Sprintf("%#v", this.Instances) + `}`}, ", ") + return s +} +func (this *DesiredLRPUpdate_Annotation) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&models.DesiredLRPUpdate_Annotation{` + + `Annotation:` + fmt.Sprintf("%#v", this.Annotation) + `}`}, ", ") + return s +} +func (this *DesiredLRPKey) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&models.DesiredLRPKey{") + s = append(s, "ProcessGuid: "+fmt.Sprintf("%#v", this.ProcessGuid)+",\n") + s = append(s, "Domain: "+fmt.Sprintf("%#v", this.Domain)+",\n") + s = append(s, "LogGuid: "+fmt.Sprintf("%#v", this.LogGuid)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPResource) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&models.DesiredLRPResource{") + s = append(s, "MemoryMb: "+fmt.Sprintf("%#v", this.MemoryMb)+",\n") + s = append(s, "DiskMb: "+fmt.Sprintf("%#v", this.DiskMb)+",\n") + s = append(s, "RootFs: "+fmt.Sprintf("%#v", this.RootFs)+",\n") + s = append(s, "MaxPids: "+fmt.Sprintf("%#v", this.MaxPids)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRP) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 41) + s = append(s, "&models.DesiredLRP{") + s = append(s, "ProcessGuid: "+fmt.Sprintf("%#v", this.ProcessGuid)+",\n") + s = append(s, "Domain: "+fmt.Sprintf("%#v", this.Domain)+",\n") + s = append(s, "RootFs: "+fmt.Sprintf("%#v", this.RootFs)+",\n") + s = append(s, "Instances: "+fmt.Sprintf("%#v", this.Instances)+",\n") + if this.EnvironmentVariables != nil { + s = append(s, "EnvironmentVariables: "+fmt.Sprintf("%#v", this.EnvironmentVariables)+",\n") + } + if this.Setup != nil { + s = append(s, "Setup: "+fmt.Sprintf("%#v", this.Setup)+",\n") + } + if this.Action != nil { + s = append(s, "Action: "+fmt.Sprintf("%#v", this.Action)+",\n") + } + s = append(s, "StartTimeoutMs: "+fmt.Sprintf("%#v", this.StartTimeoutMs)+",\n") + s = append(s, "DeprecatedStartTimeoutS: "+fmt.Sprintf("%#v", this.DeprecatedStartTimeoutS)+",\n") + if this.Monitor != nil { + s = append(s, "Monitor: "+fmt.Sprintf("%#v", this.Monitor)+",\n") + } + s = append(s, "DiskMb: "+fmt.Sprintf("%#v", this.DiskMb)+",\n") + s = append(s, "MemoryMb: "+fmt.Sprintf("%#v", this.MemoryMb)+",\n") + s = append(s, "CpuWeight: "+fmt.Sprintf("%#v", this.CpuWeight)+",\n") + s = append(s, "Privileged: "+fmt.Sprintf("%#v", this.Privileged)+",\n") + s = append(s, "Ports: "+fmt.Sprintf("%#v", this.Ports)+",\n") + s = append(s, "Routes: "+fmt.Sprintf("%#v", this.Routes)+",\n") + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "LogGuid: "+fmt.Sprintf("%#v", this.LogGuid)+",\n") + s = append(s, "MetricsGuid: "+fmt.Sprintf("%#v", this.MetricsGuid)+",\n") + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + if this.EgressRules != nil { + s = append(s, "EgressRules: "+fmt.Sprintf("%#v", this.EgressRules)+",\n") + } + if this.ModificationTag != nil { + s = append(s, "ModificationTag: "+fmt.Sprintf("%#v", this.ModificationTag)+",\n") + } + if this.CachedDependencies != nil { + s = append(s, "CachedDependencies: "+fmt.Sprintf("%#v", this.CachedDependencies)+",\n") + } + s = append(s, "LegacyDownloadUser: "+fmt.Sprintf("%#v", this.LegacyDownloadUser)+",\n") + s = append(s, "TrustedSystemCertificatesPath: "+fmt.Sprintf("%#v", this.TrustedSystemCertificatesPath)+",\n") + if this.VolumeMounts != nil { + s = append(s, "VolumeMounts: "+fmt.Sprintf("%#v", this.VolumeMounts)+",\n") + } + if this.Network != nil { + s = append(s, "Network: "+fmt.Sprintf("%#v", this.Network)+",\n") + } + s = append(s, "PlacementTags: "+fmt.Sprintf("%#v", this.PlacementTags)+",\n") + s = append(s, "MaxPids: "+fmt.Sprintf("%#v", this.MaxPids)+",\n") + if this.CertificateProperties != nil { + s = append(s, "CertificateProperties: "+fmt.Sprintf("%#v", this.CertificateProperties)+",\n") + } + s = append(s, "ImageUsername: "+fmt.Sprintf("%#v", this.ImageUsername)+",\n") + s = append(s, "ImagePassword: "+fmt.Sprintf("%#v", this.ImagePassword)+",\n") + if this.CheckDefinition != nil { + s = append(s, "CheckDefinition: "+fmt.Sprintf("%#v", this.CheckDefinition)+",\n") + } + if this.ImageLayers != nil { + s = append(s, "ImageLayers: "+fmt.Sprintf("%#v", this.ImageLayers)+",\n") + } + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]*MetricTagValue{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%#v: %#v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + if this.MetricTags != nil { + s = append(s, "MetricTags: "+mapStringForMetricTags+",\n") + } + if this.Sidecars != nil { + s = append(s, "Sidecars: "+fmt.Sprintf("%#v", this.Sidecars)+",\n") + } + if this.LogRateLimit != nil { + s = append(s, "LogRateLimit: "+fmt.Sprintf("%#v", this.LogRateLimit)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDesiredLrp(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *DesiredLRPSchedulingInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPSchedulingInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPSchedulingInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PlacementTags) > 0 { + for iNdEx := len(m.PlacementTags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PlacementTags[iNdEx]) + copy(dAtA[i:], m.PlacementTags[iNdEx]) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.PlacementTags[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.VolumePlacement != nil { + { + size, err := m.VolumePlacement.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + { + size, err := m.ModificationTag.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + { + size := m.Routes.Size() + i -= size + if _, err := m.Routes.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + { + size, err := m.DesiredLRPResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.Instances != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.Instances)) + i-- + dAtA[i] = 0x18 + } + if len(m.Annotation) > 0 { + i -= len(m.Annotation) + copy(dAtA[i:], m.Annotation) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.Annotation))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.DesiredLRPKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DesiredLRPRunInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPRunInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPRunInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LogRateLimit != nil { + { + size, err := m.LogRateLimit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + if len(m.Sidecars) > 0 { + for iNdEx := len(m.Sidecars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Sidecars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + } + if len(m.MetricTags) > 0 { + for k := range m.MetricTags { + v := m.MetricTags[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintDesiredLrp(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } + if len(m.ImageLayers) > 0 { + for iNdEx := len(m.ImageLayers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImageLayers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + } + if m.CheckDefinition != nil { + { + size, err := m.CheckDefinition.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if len(m.ImagePassword) > 0 { + i -= len(m.ImagePassword) + copy(dAtA[i:], m.ImagePassword) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.ImagePassword))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if len(m.ImageUsername) > 0 { + i -= len(m.ImageUsername) + copy(dAtA[i:], m.ImageUsername) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.ImageUsername))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if m.CertificateProperties != nil { + { + size, err := m.CertificateProperties.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.StartTimeoutMs != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.StartTimeoutMs)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if m.Network != nil { + { + size, err := m.Network.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if len(m.VolumeMounts) > 0 { + for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + } + if len(m.TrustedSystemCertificatesPath) > 0 { + i -= len(m.TrustedSystemCertificatesPath) + copy(dAtA[i:], m.TrustedSystemCertificatesPath) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.TrustedSystemCertificatesPath))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if len(m.LegacyDownloadUser) > 0 { + i -= len(m.LegacyDownloadUser) + copy(dAtA[i:], m.LegacyDownloadUser) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.LegacyDownloadUser))) + i-- + dAtA[i] = 0x7a + } + if len(m.CachedDependencies) > 0 { + for iNdEx := len(m.CachedDependencies) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CachedDependencies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + } + if m.CreatedAt != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.CreatedAt)) + i-- + dAtA[i] = 0x68 + } + if len(m.MetricsGuid) > 0 { + i -= len(m.MetricsGuid) + copy(dAtA[i:], m.MetricsGuid) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.MetricsGuid))) + i-- + dAtA[i] = 0x62 + } + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x5a + } + if len(m.EgressRules) > 0 { + for iNdEx := len(m.EgressRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EgressRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.Ports[iNdEx])) + i-- + dAtA[i] = 0x48 + } + } + if m.CpuWeight != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.CpuWeight)) + i-- + dAtA[i] = 0x40 + } + if m.Privileged { + i-- + if m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.DeprecatedStartTimeoutS != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.DeprecatedStartTimeoutS)) + i-- + dAtA[i] = 0x30 + } + if m.Monitor != nil { + { + size, err := m.Monitor.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Action != nil { + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Setup != nil { + { + size, err := m.Setup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.EnvironmentVariables) > 0 { + for iNdEx := len(m.EnvironmentVariables) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EnvironmentVariables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.DesiredLRPKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ProtoRoutes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProtoRoutes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProtoRoutes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Routes) > 0 { + for k := range m.Routes { + v := m.Routes[k] + baseI := i + if len(v) > 0 { + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintDesiredLrp(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRPUpdate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPUpdate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.MetricTags) > 0 { + for k := range m.MetricTags { + v := m.MetricTags[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintDesiredLrp(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if m.OptionalAnnotation != nil { + { + size := m.OptionalAnnotation.Size() + i -= size + if _, err := m.OptionalAnnotation.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.Routes != nil { + { + size := m.Routes.Size() + i -= size + if _, err := m.Routes.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.OptionalInstances != nil { + { + size := m.OptionalInstances.Size() + i -= size + if _, err := m.OptionalInstances.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRPUpdate_Instances) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPUpdate_Instances) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.Instances)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *DesiredLRPUpdate_Annotation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPUpdate_Annotation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Annotation) + copy(dAtA[i:], m.Annotation) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.Annotation))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *DesiredLRPKey) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPKey) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPKey) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LogGuid) > 0 { + i -= len(m.LogGuid) + copy(dAtA[i:], m.LogGuid) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.LogGuid))) + i-- + dAtA[i] = 0x1a + } + if len(m.Domain) > 0 { + i -= len(m.Domain) + copy(dAtA[i:], m.Domain) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.Domain))) + i-- + dAtA[i] = 0x12 + } + if len(m.ProcessGuid) > 0 { + i -= len(m.ProcessGuid) + copy(dAtA[i:], m.ProcessGuid) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.ProcessGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRPResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPResource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxPids != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.MaxPids)) + i-- + dAtA[i] = 0x20 + } + if len(m.RootFs) > 0 { + i -= len(m.RootFs) + copy(dAtA[i:], m.RootFs) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.RootFs))) + i-- + dAtA[i] = 0x1a + } + if m.DiskMb != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.DiskMb)) + i-- + dAtA[i] = 0x10 + } + if m.MemoryMb != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.MemoryMb)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRP) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRP) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRP) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LogRateLimit != nil { + { + size, err := m.LogRateLimit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xaa + } + if len(m.Sidecars) > 0 { + for iNdEx := len(m.Sidecars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Sidecars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + } + if len(m.MetricTags) > 0 { + for k := range m.MetricTags { + v := m.MetricTags[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintDesiredLrp(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + } + if len(m.ImageLayers) > 0 { + for iNdEx := len(m.ImageLayers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImageLayers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + } + if m.CheckDefinition != nil { + { + size, err := m.CheckDefinition.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + if len(m.ImagePassword) > 0 { + i -= len(m.ImagePassword) + copy(dAtA[i:], m.ImagePassword) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.ImagePassword))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + if len(m.ImageUsername) > 0 { + i -= len(m.ImageUsername) + copy(dAtA[i:], m.ImageUsername) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.ImageUsername))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa + } + if m.CertificateProperties != nil { + { + size, err := m.CertificateProperties.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } + if m.MaxPids != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.MaxPids)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe8 + } + if len(m.PlacementTags) > 0 { + for iNdEx := len(m.PlacementTags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PlacementTags[iNdEx]) + copy(dAtA[i:], m.PlacementTags[iNdEx]) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.PlacementTags[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + } + if m.StartTimeoutMs != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.StartTimeoutMs)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd8 + } + if m.Network != nil { + { + size, err := m.Network.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + if len(m.VolumeMounts) > 0 { + for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } + if len(m.TrustedSystemCertificatesPath) > 0 { + i -= len(m.TrustedSystemCertificatesPath) + copy(dAtA[i:], m.TrustedSystemCertificatesPath) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.TrustedSystemCertificatesPath))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.LegacyDownloadUser) > 0 { + i -= len(m.LegacyDownloadUser) + copy(dAtA[i:], m.LegacyDownloadUser) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.LegacyDownloadUser))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if len(m.CachedDependencies) > 0 { + for iNdEx := len(m.CachedDependencies) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CachedDependencies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + } + if m.ModificationTag != nil { + { + size, err := m.ModificationTag.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if len(m.EgressRules) > 0 { + for iNdEx := len(m.EgressRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EgressRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + } + if len(m.Annotation) > 0 { + i -= len(m.Annotation) + copy(dAtA[i:], m.Annotation) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.Annotation))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if len(m.MetricsGuid) > 0 { + i -= len(m.MetricsGuid) + copy(dAtA[i:], m.MetricsGuid) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.MetricsGuid))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if len(m.LogGuid) > 0 { + i -= len(m.LogGuid) + copy(dAtA[i:], m.LogGuid) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.LogGuid))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.Routes != nil { + { + size := m.Routes.Size() + i -= size + if _, err := m.Routes.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.Ports[iNdEx])) + i-- + dAtA[i] = 0x70 + } + } + if m.Privileged { + i-- + if m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x68 + } + if m.CpuWeight != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.CpuWeight)) + i-- + dAtA[i] = 0x60 + } + if m.MemoryMb != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.MemoryMb)) + i-- + dAtA[i] = 0x58 + } + if m.DiskMb != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.DiskMb)) + i-- + dAtA[i] = 0x50 + } + if m.Monitor != nil { + { + size, err := m.Monitor.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.DeprecatedStartTimeoutS != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.DeprecatedStartTimeoutS)) + i-- + dAtA[i] = 0x40 + } + if m.Action != nil { + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.Setup != nil { + { + size, err := m.Setup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if len(m.EnvironmentVariables) > 0 { + for iNdEx := len(m.EnvironmentVariables) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EnvironmentVariables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.Instances != 0 { + i = encodeVarintDesiredLrp(dAtA, i, uint64(m.Instances)) + i-- + dAtA[i] = 0x20 + } + if len(m.RootFs) > 0 { + i -= len(m.RootFs) + copy(dAtA[i:], m.RootFs) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.RootFs))) + i-- + dAtA[i] = 0x1a + } + if len(m.Domain) > 0 { + i -= len(m.Domain) + copy(dAtA[i:], m.Domain) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.Domain))) + i-- + dAtA[i] = 0x12 + } + if len(m.ProcessGuid) > 0 { + i -= len(m.ProcessGuid) + copy(dAtA[i:], m.ProcessGuid) + i = encodeVarintDesiredLrp(dAtA, i, uint64(len(m.ProcessGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintDesiredLrp(dAtA []byte, offset int, v uint64) int { + offset -= sovDesiredLrp(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *DesiredLRPSchedulingInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.DesiredLRPKey.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + l = len(m.Annotation) + if l > 0 { + n += 1 + l + sovDesiredLrp(uint64(l)) + } + if m.Instances != 0 { + n += 1 + sovDesiredLrp(uint64(m.Instances)) + } + l = m.DesiredLRPResource.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + l = m.Routes.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + l = m.ModificationTag.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + if m.VolumePlacement != nil { + l = m.VolumePlacement.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + if len(m.PlacementTags) > 0 { + for _, s := range m.PlacementTags { + l = len(s) + n += 1 + l + sovDesiredLrp(uint64(l)) + } + } + return n +} + +func (m *DesiredLRPRunInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.DesiredLRPKey.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + if len(m.EnvironmentVariables) > 0 { + for _, e := range m.EnvironmentVariables { + l = e.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + } + if m.Setup != nil { + l = m.Setup.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + if m.Action != nil { + l = m.Action.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + if m.Monitor != nil { + l = m.Monitor.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + if m.DeprecatedStartTimeoutS != 0 { + n += 1 + sovDesiredLrp(uint64(m.DeprecatedStartTimeoutS)) + } + if m.Privileged { + n += 2 + } + if m.CpuWeight != 0 { + n += 1 + sovDesiredLrp(uint64(m.CpuWeight)) + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + n += 1 + sovDesiredLrp(uint64(e)) + } + } + if len(m.EgressRules) > 0 { + for _, e := range m.EgressRules { + l = e.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.MetricsGuid) + if l > 0 { + n += 1 + l + sovDesiredLrp(uint64(l)) + } + if m.CreatedAt != 0 { + n += 1 + sovDesiredLrp(uint64(m.CreatedAt)) + } + if len(m.CachedDependencies) > 0 { + for _, e := range m.CachedDependencies { + l = e.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + } + l = len(m.LegacyDownloadUser) + if l > 0 { + n += 1 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.TrustedSystemCertificatesPath) + if l > 0 { + n += 2 + l + sovDesiredLrp(uint64(l)) + } + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + } + if m.Network != nil { + l = m.Network.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + if m.StartTimeoutMs != 0 { + n += 2 + sovDesiredLrp(uint64(m.StartTimeoutMs)) + } + if m.CertificateProperties != nil { + l = m.CertificateProperties.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.ImageUsername) + if l > 0 { + n += 2 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.ImagePassword) + if l > 0 { + n += 2 + l + sovDesiredLrp(uint64(l)) + } + if m.CheckDefinition != nil { + l = m.CheckDefinition.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + if len(m.ImageLayers) > 0 { + for _, e := range m.ImageLayers { + l = e.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + } + if len(m.MetricTags) > 0 { + for k, v := range m.MetricTags { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovDesiredLrp(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovDesiredLrp(uint64(len(k))) + l + n += mapEntrySize + 2 + sovDesiredLrp(uint64(mapEntrySize)) + } + } + if len(m.Sidecars) > 0 { + for _, e := range m.Sidecars { + l = e.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + } + if m.LogRateLimit != nil { + l = m.LogRateLimit.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + return n +} + +func (m *ProtoRoutes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Routes) > 0 { + for k, v := range m.Routes { + _ = k + _ = v + l = 0 + if len(v) > 0 { + l = 1 + len(v) + sovDesiredLrp(uint64(len(v))) + } + mapEntrySize := 1 + len(k) + sovDesiredLrp(uint64(len(k))) + l + n += mapEntrySize + 1 + sovDesiredLrp(uint64(mapEntrySize)) + } + } + return n +} + +func (m *DesiredLRPUpdate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OptionalInstances != nil { + n += m.OptionalInstances.Size() + } + if m.Routes != nil { + l = m.Routes.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + if m.OptionalAnnotation != nil { + n += m.OptionalAnnotation.Size() + } + if len(m.MetricTags) > 0 { + for k, v := range m.MetricTags { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovDesiredLrp(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovDesiredLrp(uint64(len(k))) + l + n += mapEntrySize + 1 + sovDesiredLrp(uint64(mapEntrySize)) + } + } + return n +} + +func (m *DesiredLRPUpdate_Instances) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovDesiredLrp(uint64(m.Instances)) + return n +} +func (m *DesiredLRPUpdate_Annotation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Annotation) + n += 1 + l + sovDesiredLrp(uint64(l)) + return n +} +func (m *DesiredLRPKey) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProcessGuid) + if l > 0 { + n += 1 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.Domain) + if l > 0 { + n += 1 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.LogGuid) + if l > 0 { + n += 1 + l + sovDesiredLrp(uint64(l)) + } + return n +} + +func (m *DesiredLRPResource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MemoryMb != 0 { + n += 1 + sovDesiredLrp(uint64(m.MemoryMb)) + } + if m.DiskMb != 0 { + n += 1 + sovDesiredLrp(uint64(m.DiskMb)) + } + l = len(m.RootFs) + if l > 0 { + n += 1 + l + sovDesiredLrp(uint64(l)) + } + if m.MaxPids != 0 { + n += 1 + sovDesiredLrp(uint64(m.MaxPids)) + } + return n +} + +func (m *DesiredLRP) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProcessGuid) + if l > 0 { + n += 1 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.Domain) + if l > 0 { + n += 1 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.RootFs) + if l > 0 { + n += 1 + l + sovDesiredLrp(uint64(l)) + } + if m.Instances != 0 { + n += 1 + sovDesiredLrp(uint64(m.Instances)) + } + if len(m.EnvironmentVariables) > 0 { + for _, e := range m.EnvironmentVariables { + l = e.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + } + if m.Setup != nil { + l = m.Setup.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + if m.Action != nil { + l = m.Action.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + if m.DeprecatedStartTimeoutS != 0 { + n += 1 + sovDesiredLrp(uint64(m.DeprecatedStartTimeoutS)) + } + if m.Monitor != nil { + l = m.Monitor.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + if m.DiskMb != 0 { + n += 1 + sovDesiredLrp(uint64(m.DiskMb)) + } + if m.MemoryMb != 0 { + n += 1 + sovDesiredLrp(uint64(m.MemoryMb)) + } + if m.CpuWeight != 0 { + n += 1 + sovDesiredLrp(uint64(m.CpuWeight)) + } + if m.Privileged { + n += 2 + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + n += 1 + sovDesiredLrp(uint64(e)) + } + } + if m.Routes != nil { + l = m.Routes.Size() + n += 1 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.LogSource) + if l > 0 { + n += 2 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.LogGuid) + if l > 0 { + n += 2 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.MetricsGuid) + if l > 0 { + n += 2 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.Annotation) + if l > 0 { + n += 2 + l + sovDesiredLrp(uint64(l)) + } + if len(m.EgressRules) > 0 { + for _, e := range m.EgressRules { + l = e.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + } + if m.ModificationTag != nil { + l = m.ModificationTag.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + if len(m.CachedDependencies) > 0 { + for _, e := range m.CachedDependencies { + l = e.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + } + l = len(m.LegacyDownloadUser) + if l > 0 { + n += 2 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.TrustedSystemCertificatesPath) + if l > 0 { + n += 2 + l + sovDesiredLrp(uint64(l)) + } + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + } + if m.Network != nil { + l = m.Network.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + if m.StartTimeoutMs != 0 { + n += 2 + sovDesiredLrp(uint64(m.StartTimeoutMs)) + } + if len(m.PlacementTags) > 0 { + for _, s := range m.PlacementTags { + l = len(s) + n += 2 + l + sovDesiredLrp(uint64(l)) + } + } + if m.MaxPids != 0 { + n += 2 + sovDesiredLrp(uint64(m.MaxPids)) + } + if m.CertificateProperties != nil { + l = m.CertificateProperties.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.ImageUsername) + if l > 0 { + n += 2 + l + sovDesiredLrp(uint64(l)) + } + l = len(m.ImagePassword) + if l > 0 { + n += 2 + l + sovDesiredLrp(uint64(l)) + } + if m.CheckDefinition != nil { + l = m.CheckDefinition.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + if len(m.ImageLayers) > 0 { + for _, e := range m.ImageLayers { + l = e.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + } + if len(m.MetricTags) > 0 { + for k, v := range m.MetricTags { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovDesiredLrp(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovDesiredLrp(uint64(len(k))) + l + n += mapEntrySize + 2 + sovDesiredLrp(uint64(mapEntrySize)) + } + } + if len(m.Sidecars) > 0 { + for _, e := range m.Sidecars { + l = e.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + } + if m.LogRateLimit != nil { + l = m.LogRateLimit.Size() + n += 2 + l + sovDesiredLrp(uint64(l)) + } + return n +} + +func sovDesiredLrp(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDesiredLrp(x uint64) (n int) { + return sovDesiredLrp(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DesiredLRPSchedulingInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPSchedulingInfo{`, + `DesiredLRPKey:` + strings.Replace(strings.Replace(this.DesiredLRPKey.String(), "DesiredLRPKey", "DesiredLRPKey", 1), `&`, ``, 1) + `,`, + `Annotation:` + fmt.Sprintf("%v", this.Annotation) + `,`, + `Instances:` + fmt.Sprintf("%v", this.Instances) + `,`, + `DesiredLRPResource:` + strings.Replace(strings.Replace(this.DesiredLRPResource.String(), "DesiredLRPResource", "DesiredLRPResource", 1), `&`, ``, 1) + `,`, + `Routes:` + fmt.Sprintf("%v", this.Routes) + `,`, + `ModificationTag:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ModificationTag), "ModificationTag", "ModificationTag", 1), `&`, ``, 1) + `,`, + `VolumePlacement:` + strings.Replace(fmt.Sprintf("%v", this.VolumePlacement), "VolumePlacement", "VolumePlacement", 1) + `,`, + `PlacementTags:` + fmt.Sprintf("%v", this.PlacementTags) + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPRunInfo) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnvironmentVariables := "[]EnvironmentVariable{" + for _, f := range this.EnvironmentVariables { + repeatedStringForEnvironmentVariables += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnvironmentVariables += "}" + repeatedStringForEgressRules := "[]SecurityGroupRule{" + for _, f := range this.EgressRules { + repeatedStringForEgressRules += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEgressRules += "}" + repeatedStringForCachedDependencies := "[]*CachedDependency{" + for _, f := range this.CachedDependencies { + repeatedStringForCachedDependencies += strings.Replace(fmt.Sprintf("%v", f), "CachedDependency", "CachedDependency", 1) + "," + } + repeatedStringForCachedDependencies += "}" + repeatedStringForVolumeMounts := "[]*VolumeMount{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += strings.Replace(fmt.Sprintf("%v", f), "VolumeMount", "VolumeMount", 1) + "," + } + repeatedStringForVolumeMounts += "}" + repeatedStringForImageLayers := "[]*ImageLayer{" + for _, f := range this.ImageLayers { + repeatedStringForImageLayers += strings.Replace(fmt.Sprintf("%v", f), "ImageLayer", "ImageLayer", 1) + "," + } + repeatedStringForImageLayers += "}" + repeatedStringForSidecars := "[]*Sidecar{" + for _, f := range this.Sidecars { + repeatedStringForSidecars += strings.Replace(fmt.Sprintf("%v", f), "Sidecar", "Sidecar", 1) + "," + } + repeatedStringForSidecars += "}" + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]*MetricTagValue{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%v: %v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + s := strings.Join([]string{`&DesiredLRPRunInfo{`, + `DesiredLRPKey:` + strings.Replace(strings.Replace(this.DesiredLRPKey.String(), "DesiredLRPKey", "DesiredLRPKey", 1), `&`, ``, 1) + `,`, + `EnvironmentVariables:` + repeatedStringForEnvironmentVariables + `,`, + `Setup:` + strings.Replace(fmt.Sprintf("%v", this.Setup), "Action", "Action", 1) + `,`, + `Action:` + strings.Replace(fmt.Sprintf("%v", this.Action), "Action", "Action", 1) + `,`, + `Monitor:` + strings.Replace(fmt.Sprintf("%v", this.Monitor), "Action", "Action", 1) + `,`, + `DeprecatedStartTimeoutS:` + fmt.Sprintf("%v", this.DeprecatedStartTimeoutS) + `,`, + `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, + `CpuWeight:` + fmt.Sprintf("%v", this.CpuWeight) + `,`, + `Ports:` + fmt.Sprintf("%v", this.Ports) + `,`, + `EgressRules:` + repeatedStringForEgressRules + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `MetricsGuid:` + fmt.Sprintf("%v", this.MetricsGuid) + `,`, + `CreatedAt:` + fmt.Sprintf("%v", this.CreatedAt) + `,`, + `CachedDependencies:` + repeatedStringForCachedDependencies + `,`, + `LegacyDownloadUser:` + fmt.Sprintf("%v", this.LegacyDownloadUser) + `,`, + `TrustedSystemCertificatesPath:` + fmt.Sprintf("%v", this.TrustedSystemCertificatesPath) + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `StartTimeoutMs:` + fmt.Sprintf("%v", this.StartTimeoutMs) + `,`, + `CertificateProperties:` + strings.Replace(fmt.Sprintf("%v", this.CertificateProperties), "CertificateProperties", "CertificateProperties", 1) + `,`, + `ImageUsername:` + fmt.Sprintf("%v", this.ImageUsername) + `,`, + `ImagePassword:` + fmt.Sprintf("%v", this.ImagePassword) + `,`, + `CheckDefinition:` + strings.Replace(fmt.Sprintf("%v", this.CheckDefinition), "CheckDefinition", "CheckDefinition", 1) + `,`, + `ImageLayers:` + repeatedStringForImageLayers + `,`, + `MetricTags:` + mapStringForMetricTags + `,`, + `Sidecars:` + repeatedStringForSidecars + `,`, + `LogRateLimit:` + strings.Replace(fmt.Sprintf("%v", this.LogRateLimit), "LogRateLimit", "LogRateLimit", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ProtoRoutes) String() string { + if this == nil { + return "nil" + } + keysForRoutes := make([]string, 0, len(this.Routes)) + for k, _ := range this.Routes { + keysForRoutes = append(keysForRoutes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRoutes) + mapStringForRoutes := "map[string][]byte{" + for _, k := range keysForRoutes { + mapStringForRoutes += fmt.Sprintf("%v: %v,", k, this.Routes[k]) + } + mapStringForRoutes += "}" + s := strings.Join([]string{`&ProtoRoutes{`, + `Routes:` + mapStringForRoutes + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPUpdate) String() string { + if this == nil { + return "nil" + } + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]*MetricTagValue{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%v: %v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + s := strings.Join([]string{`&DesiredLRPUpdate{`, + `OptionalInstances:` + fmt.Sprintf("%v", this.OptionalInstances) + `,`, + `Routes:` + fmt.Sprintf("%v", this.Routes) + `,`, + `OptionalAnnotation:` + fmt.Sprintf("%v", this.OptionalAnnotation) + `,`, + `MetricTags:` + mapStringForMetricTags + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPUpdate_Instances) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPUpdate_Instances{`, + `Instances:` + fmt.Sprintf("%v", this.Instances) + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPUpdate_Annotation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPUpdate_Annotation{`, + `Annotation:` + fmt.Sprintf("%v", this.Annotation) + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPKey) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPKey{`, + `ProcessGuid:` + fmt.Sprintf("%v", this.ProcessGuid) + `,`, + `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`, + `LogGuid:` + fmt.Sprintf("%v", this.LogGuid) + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPResource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPResource{`, + `MemoryMb:` + fmt.Sprintf("%v", this.MemoryMb) + `,`, + `DiskMb:` + fmt.Sprintf("%v", this.DiskMb) + `,`, + `RootFs:` + fmt.Sprintf("%v", this.RootFs) + `,`, + `MaxPids:` + fmt.Sprintf("%v", this.MaxPids) + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRP) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnvironmentVariables := "[]*EnvironmentVariable{" + for _, f := range this.EnvironmentVariables { + repeatedStringForEnvironmentVariables += strings.Replace(fmt.Sprintf("%v", f), "EnvironmentVariable", "EnvironmentVariable", 1) + "," + } + repeatedStringForEnvironmentVariables += "}" + repeatedStringForEgressRules := "[]*SecurityGroupRule{" + for _, f := range this.EgressRules { + repeatedStringForEgressRules += strings.Replace(fmt.Sprintf("%v", f), "SecurityGroupRule", "SecurityGroupRule", 1) + "," + } + repeatedStringForEgressRules += "}" + repeatedStringForCachedDependencies := "[]*CachedDependency{" + for _, f := range this.CachedDependencies { + repeatedStringForCachedDependencies += strings.Replace(fmt.Sprintf("%v", f), "CachedDependency", "CachedDependency", 1) + "," + } + repeatedStringForCachedDependencies += "}" + repeatedStringForVolumeMounts := "[]*VolumeMount{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += strings.Replace(fmt.Sprintf("%v", f), "VolumeMount", "VolumeMount", 1) + "," + } + repeatedStringForVolumeMounts += "}" + repeatedStringForImageLayers := "[]*ImageLayer{" + for _, f := range this.ImageLayers { + repeatedStringForImageLayers += strings.Replace(fmt.Sprintf("%v", f), "ImageLayer", "ImageLayer", 1) + "," + } + repeatedStringForImageLayers += "}" + repeatedStringForSidecars := "[]*Sidecar{" + for _, f := range this.Sidecars { + repeatedStringForSidecars += strings.Replace(fmt.Sprintf("%v", f), "Sidecar", "Sidecar", 1) + "," + } + repeatedStringForSidecars += "}" + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]*MetricTagValue{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%v: %v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + s := strings.Join([]string{`&DesiredLRP{`, + `ProcessGuid:` + fmt.Sprintf("%v", this.ProcessGuid) + `,`, + `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`, + `RootFs:` + fmt.Sprintf("%v", this.RootFs) + `,`, + `Instances:` + fmt.Sprintf("%v", this.Instances) + `,`, + `EnvironmentVariables:` + repeatedStringForEnvironmentVariables + `,`, + `Setup:` + strings.Replace(fmt.Sprintf("%v", this.Setup), "Action", "Action", 1) + `,`, + `Action:` + strings.Replace(fmt.Sprintf("%v", this.Action), "Action", "Action", 1) + `,`, + `DeprecatedStartTimeoutS:` + fmt.Sprintf("%v", this.DeprecatedStartTimeoutS) + `,`, + `Monitor:` + strings.Replace(fmt.Sprintf("%v", this.Monitor), "Action", "Action", 1) + `,`, + `DiskMb:` + fmt.Sprintf("%v", this.DiskMb) + `,`, + `MemoryMb:` + fmt.Sprintf("%v", this.MemoryMb) + `,`, + `CpuWeight:` + fmt.Sprintf("%v", this.CpuWeight) + `,`, + `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, + `Ports:` + fmt.Sprintf("%v", this.Ports) + `,`, + `Routes:` + fmt.Sprintf("%v", this.Routes) + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `LogGuid:` + fmt.Sprintf("%v", this.LogGuid) + `,`, + `MetricsGuid:` + fmt.Sprintf("%v", this.MetricsGuid) + `,`, + `Annotation:` + fmt.Sprintf("%v", this.Annotation) + `,`, + `EgressRules:` + repeatedStringForEgressRules + `,`, + `ModificationTag:` + strings.Replace(fmt.Sprintf("%v", this.ModificationTag), "ModificationTag", "ModificationTag", 1) + `,`, + `CachedDependencies:` + repeatedStringForCachedDependencies + `,`, + `LegacyDownloadUser:` + fmt.Sprintf("%v", this.LegacyDownloadUser) + `,`, + `TrustedSystemCertificatesPath:` + fmt.Sprintf("%v", this.TrustedSystemCertificatesPath) + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `StartTimeoutMs:` + fmt.Sprintf("%v", this.StartTimeoutMs) + `,`, + `PlacementTags:` + fmt.Sprintf("%v", this.PlacementTags) + `,`, + `MaxPids:` + fmt.Sprintf("%v", this.MaxPids) + `,`, + `CertificateProperties:` + strings.Replace(fmt.Sprintf("%v", this.CertificateProperties), "CertificateProperties", "CertificateProperties", 1) + `,`, + `ImageUsername:` + fmt.Sprintf("%v", this.ImageUsername) + `,`, + `ImagePassword:` + fmt.Sprintf("%v", this.ImagePassword) + `,`, + `CheckDefinition:` + strings.Replace(fmt.Sprintf("%v", this.CheckDefinition), "CheckDefinition", "CheckDefinition", 1) + `,`, + `ImageLayers:` + repeatedStringForImageLayers + `,`, + `MetricTags:` + mapStringForMetricTags + `,`, + `Sidecars:` + repeatedStringForSidecars + `,`, + `LogRateLimit:` + strings.Replace(fmt.Sprintf("%v", this.LogRateLimit), "LogRateLimit", "LogRateLimit", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringDesiredLrp(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DesiredLRPSchedulingInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPSchedulingInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPSchedulingInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredLRPKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DesiredLRPKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Annotation = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Instances", wireType) + } + m.Instances = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Instances |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredLRPResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DesiredLRPResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Routes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Routes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ModificationTag", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ModificationTag.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumePlacement", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VolumePlacement == nil { + m.VolumePlacement = &VolumePlacement{} + } + if err := m.VolumePlacement.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PlacementTags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PlacementTags = append(m.PlacementTags, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPRunInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPRunInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPRunInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredLRPKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DesiredLRPKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EnvironmentVariables", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EnvironmentVariables = append(m.EnvironmentVariables, EnvironmentVariable{}) + if err := m.EnvironmentVariables[len(m.EnvironmentVariables)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Setup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Setup == nil { + m.Setup = &Action{} + } + if err := m.Setup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Action == nil { + m.Action = &Action{} + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Monitor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Monitor == nil { + m.Monitor = &Action{} + } + if err := m.Monitor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedStartTimeoutS", wireType) + } + m.DeprecatedStartTimeoutS = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DeprecatedStartTimeoutS |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Privileged = bool(v != 0) + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CpuWeight", wireType) + } + m.CpuWeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CpuWeight |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ports = append(m.Ports, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Ports) == 0 { + m.Ports = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ports = append(m.Ports, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EgressRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EgressRules = append(m.EgressRules, SecurityGroupRule{}) + if err := m.EgressRules[len(m.EgressRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricsGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricsGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + m.CreatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CachedDependencies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CachedDependencies = append(m.CachedDependencies, &CachedDependency{}) + if err := m.CachedDependencies[len(m.CachedDependencies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LegacyDownloadUser", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LegacyDownloadUser = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustedSystemCertificatesPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TrustedSystemCertificatesPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeMounts = append(m.VolumeMounts, &VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Network == nil { + m.Network = &Network{} + } + if err := m.Network.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimeoutMs", wireType) + } + m.StartTimeoutMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTimeoutMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CertificateProperties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CertificateProperties == nil { + m.CertificateProperties = &CertificateProperties{} + } + if err := m.CertificateProperties.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageUsername", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageUsername = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePassword", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePassword = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CheckDefinition", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CheckDefinition == nil { + m.CheckDefinition = &CheckDefinition{} + } + if err := m.CheckDefinition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageLayers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageLayers = append(m.ImageLayers, &ImageLayer{}) + if err := m.ImageLayers[len(m.ImageLayers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricTags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricTags == nil { + m.MetricTags = make(map[string]*MetricTagValue) + } + var mapkey string + var mapvalue *MetricTagValue + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthDesiredLrp + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthDesiredLrp + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &MetricTagValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipDesiredLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrp + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MetricTags[mapkey] = mapvalue + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sidecars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sidecars = append(m.Sidecars, &Sidecar{}) + if err := m.Sidecars[len(m.Sidecars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 27: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogRateLimit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LogRateLimit == nil { + m.LogRateLimit = &LogRateLimit{} + } + if err := m.LogRateLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProtoRoutes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProtoRoutes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProtoRoutes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Routes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Routes == nil { + m.Routes = make(map[string][]byte) + } + var mapkey string + mapvalue := []byte{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthDesiredLrp + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthDesiredLrp + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipDesiredLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrp + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Routes[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPUpdate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPUpdate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Instances", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.OptionalInstances = &DesiredLRPUpdate_Instances{v} + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Routes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Routes == nil { + m.Routes = &Routes{} + } + if err := m.Routes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OptionalAnnotation = &DesiredLRPUpdate_Annotation{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricTags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricTags == nil { + m.MetricTags = make(map[string]*MetricTagValue) + } + var mapkey string + var mapvalue *MetricTagValue + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthDesiredLrp + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthDesiredLrp + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &MetricTagValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipDesiredLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrp + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MetricTags[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPKey) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryMb", wireType) + } + m.MemoryMb = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemoryMb |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiskMb", wireType) + } + m.DiskMb = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DiskMb |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootFs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RootFs = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPids", wireType) + } + m.MaxPids = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxPids |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDesiredLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRP) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRP: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRP: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootFs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RootFs = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Instances", wireType) + } + m.Instances = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Instances |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EnvironmentVariables", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EnvironmentVariables = append(m.EnvironmentVariables, &EnvironmentVariable{}) + if err := m.EnvironmentVariables[len(m.EnvironmentVariables)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Setup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Setup == nil { + m.Setup = &Action{} + } + if err := m.Setup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Action == nil { + m.Action = &Action{} + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedStartTimeoutS", wireType) + } + m.DeprecatedStartTimeoutS = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DeprecatedStartTimeoutS |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Monitor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Monitor == nil { + m.Monitor = &Action{} + } + if err := m.Monitor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiskMb", wireType) + } + m.DiskMb = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DiskMb |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryMb", wireType) + } + m.MemoryMb = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemoryMb |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CpuWeight", wireType) + } + m.CpuWeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CpuWeight |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Privileged = bool(v != 0) + case 14: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ports = append(m.Ports, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Ports) == 0 { + m.Ports = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ports = append(m.Ports, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Routes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Routes == nil { + m.Routes = &Routes{} + } + if err := m.Routes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricsGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricsGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Annotation = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EgressRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EgressRules = append(m.EgressRules, &SecurityGroupRule{}) + if err := m.EgressRules[len(m.EgressRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ModificationTag", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ModificationTag == nil { + m.ModificationTag = &ModificationTag{} + } + if err := m.ModificationTag.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CachedDependencies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CachedDependencies = append(m.CachedDependencies, &CachedDependency{}) + if err := m.CachedDependencies[len(m.CachedDependencies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LegacyDownloadUser", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LegacyDownloadUser = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustedSystemCertificatesPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TrustedSystemCertificatesPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeMounts = append(m.VolumeMounts, &VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Network == nil { + m.Network = &Network{} + } + if err := m.Network.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 27: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimeoutMs", wireType) + } + m.StartTimeoutMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTimeoutMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 28: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PlacementTags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PlacementTags = append(m.PlacementTags, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 29: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPids", wireType) + } + m.MaxPids = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxPids |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 30: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CertificateProperties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CertificateProperties == nil { + m.CertificateProperties = &CertificateProperties{} + } + if err := m.CertificateProperties.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 31: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageUsername", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageUsername = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 32: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePassword", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePassword = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 33: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CheckDefinition", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CheckDefinition == nil { + m.CheckDefinition = &CheckDefinition{} + } + if err := m.CheckDefinition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageLayers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageLayers = append(m.ImageLayers, &ImageLayer{}) + if err := m.ImageLayers[len(m.ImageLayers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 35: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricTags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricTags == nil { + m.MetricTags = make(map[string]*MetricTagValue) + } + var mapkey string + var mapvalue *MetricTagValue + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthDesiredLrp + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthDesiredLrp + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &MetricTagValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipDesiredLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrp + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MetricTags[mapkey] = mapvalue + iNdEx = postIndex + case 36: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sidecars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sidecars = append(m.Sidecars, &Sidecar{}) + if err := m.Sidecars[len(m.Sidecars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 37: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogRateLimit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LogRateLimit == nil { + m.LogRateLimit = &LogRateLimit{} + } + if err := m.LogRateLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrp(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDesiredLrp(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDesiredLrp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDesiredLrp + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupDesiredLrp + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthDesiredLrp + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthDesiredLrp = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDesiredLrp = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupDesiredLrp = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/desired_lrp.proto b/vendor/code.cloudfoundry.org/bbs/models/desired_lrp.proto new file mode 100644 index 00000000..d5697c9a --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/desired_lrp.proto @@ -0,0 +1,150 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "actions.proto"; +import "cached_dependency.proto"; +import "certificate_properties.proto"; +import "environment_variables.proto"; +import "modification_tag.proto"; +import "network.proto"; +import "security_group.proto"; +import "volume_mount.proto"; +import "check_definition.proto"; +import "image_layer.proto"; +import "metric_tags.proto"; +import "sidecar.proto"; +import "log_rate_limit.proto"; + +message DesiredLRPSchedulingInfo { + DesiredLRPKey desired_lrp_key = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + + string annotation = 2 [(gogoproto.jsontag) = "annotation"]; + int32 instances = 3 [(gogoproto.jsontag) = "instances"]; + + DesiredLRPResource desired_lrp_resource = 4 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + + ProtoRoutes routes = 5 [(gogoproto.nullable) = false, (gogoproto.customtype) = "Routes"]; + ModificationTag modification_tag = 6 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + VolumePlacement volume_placement = 7; + repeated string PlacementTags = 8 [(gogoproto.jsontag) ="placement_tags,omitempty"]; +} + +message DesiredLRPRunInfo { + DesiredLRPKey desired_lrp_key = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + + repeated EnvironmentVariable environment_variables = 2 [(gogoproto.jsontag) = "env", (gogoproto.nullable) = false]; + Action setup = 3; + Action action = 4; + Action monitor = 5; + + uint32 deprecated_start_timeout_s = 6 [(gogoproto.jsontag) = "start_timeout,omitempty", deprecated=true]; + + bool privileged = 7 [(gogoproto.jsontag) = "privileged"]; + + uint32 cpu_weight = 8 [(gogoproto.jsontag) = "cpu_weight"]; + repeated uint32 ports = 9 [packed = false]; + repeated SecurityGroupRule egress_rules = 10 [(gogoproto.nullable) = false]; + string log_source = 11 [(gogoproto.jsontag) = "log_source"]; + string metrics_guid = 12 [deprecated=true, (gogoproto.jsontag) = "metrics_guid"]; + int64 created_at = 13 [(gogoproto.jsontag) = "created_at"]; + repeated CachedDependency cached_dependencies = 14; + string legacy_download_user = 15 [deprecated=true]; + string trusted_system_certificates_path = 16; + repeated VolumeMount volume_mounts = 17; + Network network = 18; + + int64 start_timeout_ms = 19 [(gogoproto.jsontag) = "start_timeout_ms"]; + + CertificateProperties certificate_properties = 20 [(gogoproto.nullable) = true]; + + string image_username = 21; + string image_password = 22; + + CheckDefinition check_definition = 23; + + repeated ImageLayer image_layers = 24; + + map metric_tags = 25; + + repeated Sidecar sidecars = 26; + LogRateLimit log_rate_limit = 27; +} + +// helper message for marshalling routes +message ProtoRoutes { + map routes = 1; +} + +message DesiredLRPUpdate { + oneof optional_instances { + int32 instances = 1; + } + ProtoRoutes routes = 2 [(gogoproto.nullable) = true, (gogoproto.customtype) = "Routes"]; + oneof optional_annotation { + string annotation = 3; + } + map metric_tags = 4; +} + +message DesiredLRPKey { + string process_guid = 1 [(gogoproto.jsontag) = "process_guid"]; + string domain = 2 [(gogoproto.jsontag) = "domain"]; + string log_guid = 3 [(gogoproto.jsontag) = "log_guid"]; +} + +message DesiredLRPResource { + int32 memory_mb = 1 [(gogoproto.jsontag) = "memory_mb"]; + int32 disk_mb = 2 [(gogoproto.jsontag) = "disk_mb"]; + string root_fs = 3 [(gogoproto.jsontag) = "rootfs"]; + int32 max_pids = 4 [(gogoproto.jsontag) = "max_pids"]; +} + +message DesiredLRP { + string process_guid = 1 [(gogoproto.jsontag) = "process_guid"]; + string domain = 2 [(gogoproto.jsontag) = "domain"]; + string root_fs = 3 [(gogoproto.jsontag) = "rootfs"]; + int32 instances = 4 [(gogoproto.jsontag) = "instances"]; + repeated EnvironmentVariable environment_variables = 5 [(gogoproto.jsontag) = "env"]; + Action setup = 6; + Action action = 7; + + int64 start_timeout_ms = 27 [(gogoproto.jsontag) = "start_timeout_ms"]; + uint32 deprecated_start_timeout_s = 8 [(gogoproto.jsontag) = "deprecated_timeout_ns,omitempty", deprecated=true]; + + Action monitor = 9; + int32 disk_mb = 10 [(gogoproto.jsontag) = "disk_mb"]; + int32 memory_mb = 11 [(gogoproto.jsontag) = "memory_mb"]; + uint32 cpu_weight = 12 [(gogoproto.jsontag) = "cpu_weight"]; + bool privileged = 13 [(gogoproto.jsontag) = "privileged"]; + repeated uint32 ports = 14 [packed = false]; + ProtoRoutes routes = 15 [(gogoproto.nullable) = true, (gogoproto.customtype) = "Routes"]; + string log_source = 16 [(gogoproto.jsontag) = "log_source"]; + string log_guid = 17 [(gogoproto.jsontag) = "log_guid"]; + string metrics_guid = 18 [deprecated=true, (gogoproto.jsontag) = "metrics_guid"]; + string annotation = 19 [(gogoproto.jsontag) = "annotation"]; + repeated SecurityGroupRule egress_rules = 20; + ModificationTag modification_tag = 21; + repeated CachedDependency cached_dependencies = 22; + string legacy_download_user = 23 [deprecated=true]; + string trusted_system_certificates_path = 24; + repeated VolumeMount volume_mounts = 25; + Network network = 26; + repeated string PlacementTags = 28 [(gogoproto.jsontag) ="placement_tags,omitempty"]; + int32 max_pids = 29 [(gogoproto.jsontag) = "max_pids"]; + + CertificateProperties certificate_properties = 30 [(gogoproto.nullable) = true]; + + string image_username = 31; + string image_password = 32; + + CheckDefinition check_definition = 33; + + repeated ImageLayer image_layers = 34; + + map metric_tags = 35; + + repeated Sidecar sidecars = 36; + LogRateLimit log_rate_limit = 37; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/desired_lrp_requests.go b/vendor/code.cloudfoundry.org/bbs/models/desired_lrp_requests.go new file mode 100644 index 00000000..92030fec --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/desired_lrp_requests.go @@ -0,0 +1,69 @@ +package models + +func (request *DesiredLRPsRequest) Validate() error { + return nil +} + +func (request *DesiredLRPByProcessGuidRequest) Validate() error { + var validationError ValidationError + + if request.ProcessGuid == "" { + validationError = validationError.Append(ErrInvalidField{"process_guid"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *DesireLRPRequest) Validate() error { + var validationError ValidationError + + if request.DesiredLrp == nil { + validationError = validationError.Append(ErrInvalidField{"desired_lrp"}) + } else if err := request.DesiredLrp.Validate(); err != nil { + validationError = validationError.Append(err) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *UpdateDesiredLRPRequest) Validate() error { + var validationError ValidationError + + if request.ProcessGuid == "" { + validationError = validationError.Append(ErrInvalidField{"process_guid"}) + } + + if request.Update != nil { + if err := request.Update.Validate(); err != nil { + validationError = validationError.Append(err) + } + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *RemoveDesiredLRPRequest) Validate() error { + var validationError ValidationError + + if request.ProcessGuid == "" { + validationError = validationError.Append(ErrInvalidField{"process_guid"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/desired_lrp_requests.pb.go b/vendor/code.cloudfoundry.org/bbs/models/desired_lrp_requests.pb.go new file mode 100644 index 00000000..703679ff --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/desired_lrp_requests.pb.go @@ -0,0 +1,2806 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: desired_lrp_requests.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type DesiredLRPLifecycleResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *DesiredLRPLifecycleResponse) Reset() { *m = DesiredLRPLifecycleResponse{} } +func (*DesiredLRPLifecycleResponse) ProtoMessage() {} +func (*DesiredLRPLifecycleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7235cc1a84e38c85, []int{0} +} +func (m *DesiredLRPLifecycleResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPLifecycleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPLifecycleResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPLifecycleResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPLifecycleResponse.Merge(m, src) +} +func (m *DesiredLRPLifecycleResponse) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPLifecycleResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPLifecycleResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPLifecycleResponse proto.InternalMessageInfo + +func (m *DesiredLRPLifecycleResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +type DesiredLRPsResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + DesiredLrps []*DesiredLRP `protobuf:"bytes,2,rep,name=desired_lrps,json=desiredLrps,proto3" json:"desired_lrps,omitempty"` +} + +func (m *DesiredLRPsResponse) Reset() { *m = DesiredLRPsResponse{} } +func (*DesiredLRPsResponse) ProtoMessage() {} +func (*DesiredLRPsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7235cc1a84e38c85, []int{1} +} +func (m *DesiredLRPsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPsResponse.Merge(m, src) +} +func (m *DesiredLRPsResponse) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPsResponse proto.InternalMessageInfo + +func (m *DesiredLRPsResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *DesiredLRPsResponse) GetDesiredLrps() []*DesiredLRP { + if m != nil { + return m.DesiredLrps + } + return nil +} + +type DesiredLRPsRequest struct { + Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain"` + ProcessGuids []string `protobuf:"bytes,2,rep,name=process_guids,json=processGuids,proto3" json:"process_guids,omitempty"` +} + +func (m *DesiredLRPsRequest) Reset() { *m = DesiredLRPsRequest{} } +func (*DesiredLRPsRequest) ProtoMessage() {} +func (*DesiredLRPsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7235cc1a84e38c85, []int{2} +} +func (m *DesiredLRPsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPsRequest.Merge(m, src) +} +func (m *DesiredLRPsRequest) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPsRequest proto.InternalMessageInfo + +func (m *DesiredLRPsRequest) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +func (m *DesiredLRPsRequest) GetProcessGuids() []string { + if m != nil { + return m.ProcessGuids + } + return nil +} + +type DesiredLRPResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + DesiredLrp *DesiredLRP `protobuf:"bytes,2,opt,name=desired_lrp,json=desiredLrp,proto3" json:"desired_lrp,omitempty"` +} + +func (m *DesiredLRPResponse) Reset() { *m = DesiredLRPResponse{} } +func (*DesiredLRPResponse) ProtoMessage() {} +func (*DesiredLRPResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7235cc1a84e38c85, []int{3} +} +func (m *DesiredLRPResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPResponse.Merge(m, src) +} +func (m *DesiredLRPResponse) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPResponse proto.InternalMessageInfo + +func (m *DesiredLRPResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *DesiredLRPResponse) GetDesiredLrp() *DesiredLRP { + if m != nil { + return m.DesiredLrp + } + return nil +} + +type DesiredLRPSchedulingInfosResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + DesiredLrpSchedulingInfos []*DesiredLRPSchedulingInfo `protobuf:"bytes,2,rep,name=desired_lrp_scheduling_infos,json=desiredLrpSchedulingInfos,proto3" json:"desired_lrp_scheduling_infos,omitempty"` +} + +func (m *DesiredLRPSchedulingInfosResponse) Reset() { *m = DesiredLRPSchedulingInfosResponse{} } +func (*DesiredLRPSchedulingInfosResponse) ProtoMessage() {} +func (*DesiredLRPSchedulingInfosResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7235cc1a84e38c85, []int{4} +} +func (m *DesiredLRPSchedulingInfosResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPSchedulingInfosResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPSchedulingInfosResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPSchedulingInfosResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPSchedulingInfosResponse.Merge(m, src) +} +func (m *DesiredLRPSchedulingInfosResponse) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPSchedulingInfosResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPSchedulingInfosResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPSchedulingInfosResponse proto.InternalMessageInfo + +func (m *DesiredLRPSchedulingInfosResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *DesiredLRPSchedulingInfosResponse) GetDesiredLrpSchedulingInfos() []*DesiredLRPSchedulingInfo { + if m != nil { + return m.DesiredLrpSchedulingInfos + } + return nil +} + +type DesiredLRPSchedulingInfoByProcessGuidResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + DesiredLrpSchedulingInfo *DesiredLRPSchedulingInfo `protobuf:"bytes,2,opt,name=desired_lrp_scheduling_info,json=desiredLrpSchedulingInfo,proto3" json:"desired_lrp_scheduling_info,omitempty"` +} + +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) Reset() { + *m = DesiredLRPSchedulingInfoByProcessGuidResponse{} +} +func (*DesiredLRPSchedulingInfoByProcessGuidResponse) ProtoMessage() {} +func (*DesiredLRPSchedulingInfoByProcessGuidResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7235cc1a84e38c85, []int{5} +} +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPSchedulingInfoByProcessGuidResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPSchedulingInfoByProcessGuidResponse.Merge(m, src) +} +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPSchedulingInfoByProcessGuidResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPSchedulingInfoByProcessGuidResponse proto.InternalMessageInfo + +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) GetDesiredLrpSchedulingInfo() *DesiredLRPSchedulingInfo { + if m != nil { + return m.DesiredLrpSchedulingInfo + } + return nil +} + +type DesiredLRPByProcessGuidRequest struct { + ProcessGuid string `protobuf:"bytes,1,opt,name=process_guid,json=processGuid,proto3" json:"process_guid"` +} + +func (m *DesiredLRPByProcessGuidRequest) Reset() { *m = DesiredLRPByProcessGuidRequest{} } +func (*DesiredLRPByProcessGuidRequest) ProtoMessage() {} +func (*DesiredLRPByProcessGuidRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7235cc1a84e38c85, []int{6} +} +func (m *DesiredLRPByProcessGuidRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPByProcessGuidRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPByProcessGuidRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPByProcessGuidRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPByProcessGuidRequest.Merge(m, src) +} +func (m *DesiredLRPByProcessGuidRequest) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPByProcessGuidRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPByProcessGuidRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPByProcessGuidRequest proto.InternalMessageInfo + +func (m *DesiredLRPByProcessGuidRequest) GetProcessGuid() string { + if m != nil { + return m.ProcessGuid + } + return "" +} + +type DesireLRPRequest struct { + DesiredLrp *DesiredLRP `protobuf:"bytes,1,opt,name=desired_lrp,json=desiredLrp,proto3" json:"desired_lrp,omitempty"` +} + +func (m *DesireLRPRequest) Reset() { *m = DesireLRPRequest{} } +func (*DesireLRPRequest) ProtoMessage() {} +func (*DesireLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7235cc1a84e38c85, []int{7} +} +func (m *DesireLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesireLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesireLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesireLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesireLRPRequest.Merge(m, src) +} +func (m *DesireLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *DesireLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DesireLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DesireLRPRequest proto.InternalMessageInfo + +func (m *DesireLRPRequest) GetDesiredLrp() *DesiredLRP { + if m != nil { + return m.DesiredLrp + } + return nil +} + +type UpdateDesiredLRPRequest struct { + ProcessGuid string `protobuf:"bytes,1,opt,name=process_guid,json=processGuid,proto3" json:"process_guid"` + Update *DesiredLRPUpdate `protobuf:"bytes,2,opt,name=update,proto3" json:"update,omitempty"` +} + +func (m *UpdateDesiredLRPRequest) Reset() { *m = UpdateDesiredLRPRequest{} } +func (*UpdateDesiredLRPRequest) ProtoMessage() {} +func (*UpdateDesiredLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7235cc1a84e38c85, []int{8} +} +func (m *UpdateDesiredLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateDesiredLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateDesiredLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UpdateDesiredLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateDesiredLRPRequest.Merge(m, src) +} +func (m *UpdateDesiredLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *UpdateDesiredLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateDesiredLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateDesiredLRPRequest proto.InternalMessageInfo + +func (m *UpdateDesiredLRPRequest) GetProcessGuid() string { + if m != nil { + return m.ProcessGuid + } + return "" +} + +func (m *UpdateDesiredLRPRequest) GetUpdate() *DesiredLRPUpdate { + if m != nil { + return m.Update + } + return nil +} + +type RemoveDesiredLRPRequest struct { + ProcessGuid string `protobuf:"bytes,1,opt,name=process_guid,json=processGuid,proto3" json:"process_guid"` +} + +func (m *RemoveDesiredLRPRequest) Reset() { *m = RemoveDesiredLRPRequest{} } +func (*RemoveDesiredLRPRequest) ProtoMessage() {} +func (*RemoveDesiredLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7235cc1a84e38c85, []int{9} +} +func (m *RemoveDesiredLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemoveDesiredLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemoveDesiredLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemoveDesiredLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveDesiredLRPRequest.Merge(m, src) +} +func (m *RemoveDesiredLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *RemoveDesiredLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveDesiredLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveDesiredLRPRequest proto.InternalMessageInfo + +func (m *RemoveDesiredLRPRequest) GetProcessGuid() string { + if m != nil { + return m.ProcessGuid + } + return "" +} + +func init() { + proto.RegisterType((*DesiredLRPLifecycleResponse)(nil), "models.DesiredLRPLifecycleResponse") + proto.RegisterType((*DesiredLRPsResponse)(nil), "models.DesiredLRPsResponse") + proto.RegisterType((*DesiredLRPsRequest)(nil), "models.DesiredLRPsRequest") + proto.RegisterType((*DesiredLRPResponse)(nil), "models.DesiredLRPResponse") + proto.RegisterType((*DesiredLRPSchedulingInfosResponse)(nil), "models.DesiredLRPSchedulingInfosResponse") + proto.RegisterType((*DesiredLRPSchedulingInfoByProcessGuidResponse)(nil), "models.DesiredLRPSchedulingInfoByProcessGuidResponse") + proto.RegisterType((*DesiredLRPByProcessGuidRequest)(nil), "models.DesiredLRPByProcessGuidRequest") + proto.RegisterType((*DesireLRPRequest)(nil), "models.DesireLRPRequest") + proto.RegisterType((*UpdateDesiredLRPRequest)(nil), "models.UpdateDesiredLRPRequest") + proto.RegisterType((*RemoveDesiredLRPRequest)(nil), "models.RemoveDesiredLRPRequest") +} + +func init() { proto.RegisterFile("desired_lrp_requests.proto", fileDescriptor_7235cc1a84e38c85) } + +var fileDescriptor_7235cc1a84e38c85 = []byte{ + // 493 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0x4f, 0x6b, 0x13, 0x41, + 0x18, 0xc6, 0x77, 0x2a, 0x06, 0xfa, 0x6e, 0x0a, 0x75, 0x3c, 0x74, 0x4d, 0x65, 0x1a, 0xa7, 0x97, + 0x5e, 0x9a, 0x4a, 0xa3, 0x5f, 0x20, 0x28, 0x45, 0x08, 0x52, 0x46, 0x7a, 0x94, 0x25, 0xd9, 0x9d, + 0x6c, 0x17, 0x92, 0x9d, 0xed, 0x4c, 0x56, 0xe8, 0xad, 0x1f, 0xc1, 0x8f, 0x21, 0x78, 0xf6, 0x3b, + 0x78, 0xcc, 0xb1, 0xa7, 0x62, 0x36, 0x17, 0xe9, 0xa9, 0x1f, 0x41, 0x32, 0x33, 0x75, 0x27, 0xad, + 0x51, 0x23, 0x9e, 0x92, 0x79, 0xff, 0x3c, 0xef, 0xef, 0x7d, 0x79, 0x58, 0x68, 0xc4, 0x5c, 0xa5, + 0x92, 0xc7, 0xe1, 0x50, 0xe6, 0xa1, 0xe4, 0x67, 0x05, 0x57, 0x63, 0xd5, 0xca, 0xa5, 0x18, 0x0b, + 0x5c, 0x1b, 0x89, 0x98, 0x0f, 0x55, 0x63, 0x3f, 0x49, 0xc7, 0xa7, 0x45, 0xbf, 0x15, 0x89, 0xd1, + 0x41, 0x22, 0x12, 0x71, 0xa0, 0xd3, 0xfd, 0x62, 0xa0, 0x5f, 0xfa, 0xa1, 0xff, 0x99, 0xb6, 0xc6, + 0x23, 0x47, 0xd2, 0x86, 0x7c, 0x2e, 0xa5, 0x90, 0xe6, 0x41, 0x3b, 0xb0, 0xfd, 0xca, 0x54, 0x74, + 0xd9, 0x71, 0x37, 0x1d, 0xf0, 0xe8, 0x3c, 0x1a, 0x72, 0xc6, 0x55, 0x2e, 0x32, 0xc5, 0xf1, 0x2e, + 0x3c, 0xd4, 0xd5, 0x01, 0x6a, 0xa2, 0x3d, 0xff, 0x70, 0xa3, 0x65, 0x28, 0x5a, 0xaf, 0xe7, 0x41, + 0x66, 0x72, 0xf4, 0x0c, 0x1e, 0x57, 0x1a, 0x6a, 0xa5, 0x5e, 0xfc, 0x12, 0xea, 0x0e, 0xa1, 0x0a, + 0xd6, 0x9a, 0x0f, 0xf6, 0xfc, 0x43, 0x7c, 0x5b, 0x5b, 0xe9, 0x32, 0xdf, 0xd6, 0x75, 0x65, 0xae, + 0xe8, 0x7b, 0xc0, 0x0b, 0x23, 0xf5, 0xa9, 0x30, 0x85, 0x5a, 0x2c, 0x46, 0xbd, 0x34, 0xd3, 0x23, + 0xd7, 0x3b, 0x70, 0x7d, 0xb5, 0x63, 0x23, 0xcc, 0xfe, 0xe2, 0x5d, 0xd8, 0xc8, 0xa5, 0x88, 0xb8, + 0x52, 0x61, 0x52, 0xa4, 0xb1, 0x99, 0xb8, 0xce, 0xea, 0x36, 0x78, 0x34, 0x8f, 0xd1, 0xcc, 0x95, + 0x5f, 0x6d, 0xa1, 0x36, 0xf8, 0xce, 0x42, 0xc1, 0x9a, 0x2e, 0xfd, 0xd5, 0x3e, 0x50, 0xed, 0x43, + 0x3f, 0x23, 0x78, 0x56, 0xa5, 0xde, 0x45, 0xa7, 0x3c, 0x2e, 0x86, 0x69, 0x96, 0xbc, 0xc9, 0x06, + 0x62, 0xc5, 0x83, 0xf6, 0xe0, 0xa9, 0xeb, 0x22, 0xf5, 0x53, 0x2b, 0x4c, 0xe7, 0x62, 0xf6, 0xc0, + 0xcd, 0xfb, 0x40, 0x8b, 0x53, 0xd9, 0x93, 0x0a, 0xef, 0x0e, 0x0f, 0xfd, 0x82, 0x60, 0x7f, 0x59, + 0x5f, 0xe7, 0xfc, 0xb8, 0x3a, 0xe4, 0x6a, 0xe4, 0x21, 0x6c, 0xff, 0x86, 0xdc, 0x5e, 0xf2, 0xcf, + 0xe0, 0xc1, 0x32, 0x70, 0x7a, 0x02, 0xa4, 0xea, 0xba, 0x03, 0x6a, 0x0c, 0xd4, 0x86, 0xba, 0x6b, + 0x0e, 0x6b, 0xa3, 0xcd, 0xeb, 0xab, 0x9d, 0x85, 0x38, 0xf3, 0x1d, 0xb7, 0xd0, 0x23, 0xd8, 0x34, + 0xb2, 0xda, 0x2b, 0xb7, 0x42, 0x0b, 0x2e, 0x40, 0x7f, 0xe5, 0x82, 0x0b, 0x04, 0x5b, 0x27, 0x79, + 0xdc, 0x1b, 0x73, 0xd7, 0x7c, 0xff, 0x4e, 0x86, 0x9f, 0x43, 0xad, 0xd0, 0x7a, 0xf6, 0x78, 0xc1, + 0x7d, 0x00, 0x33, 0x8f, 0xd9, 0x3a, 0xfa, 0x16, 0xb6, 0x18, 0x1f, 0x89, 0x0f, 0xff, 0x89, 0xa0, + 0xf3, 0x62, 0x32, 0x25, 0xde, 0xe5, 0x94, 0x78, 0x37, 0x53, 0x82, 0x2e, 0x4a, 0x82, 0x3e, 0x95, + 0x04, 0x7d, 0x2d, 0x09, 0x9a, 0x94, 0x04, 0x7d, 0x2b, 0x09, 0xfa, 0x5e, 0x12, 0xef, 0xa6, 0x24, + 0xe8, 0xe3, 0x8c, 0x78, 0x93, 0x19, 0xf1, 0x2e, 0x67, 0xc4, 0xeb, 0xd7, 0xf4, 0xb7, 0xa9, 0xfd, + 0x23, 0x00, 0x00, 0xff, 0xff, 0xca, 0x08, 0x89, 0xe8, 0x10, 0x05, 0x00, 0x00, +} + +func (this *DesiredLRPLifecycleResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPLifecycleResponse) + if !ok { + that2, ok := that.(DesiredLRPLifecycleResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + return true +} +func (this *DesiredLRPsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPsResponse) + if !ok { + that2, ok := that.(DesiredLRPsResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + if len(this.DesiredLrps) != len(that1.DesiredLrps) { + return false + } + for i := range this.DesiredLrps { + if !this.DesiredLrps[i].Equal(that1.DesiredLrps[i]) { + return false + } + } + return true +} +func (this *DesiredLRPsRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPsRequest) + if !ok { + that2, ok := that.(DesiredLRPsRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Domain != that1.Domain { + return false + } + if len(this.ProcessGuids) != len(that1.ProcessGuids) { + return false + } + for i := range this.ProcessGuids { + if this.ProcessGuids[i] != that1.ProcessGuids[i] { + return false + } + } + return true +} +func (this *DesiredLRPResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPResponse) + if !ok { + that2, ok := that.(DesiredLRPResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + if !this.DesiredLrp.Equal(that1.DesiredLrp) { + return false + } + return true +} +func (this *DesiredLRPSchedulingInfosResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPSchedulingInfosResponse) + if !ok { + that2, ok := that.(DesiredLRPSchedulingInfosResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + if len(this.DesiredLrpSchedulingInfos) != len(that1.DesiredLrpSchedulingInfos) { + return false + } + for i := range this.DesiredLrpSchedulingInfos { + if !this.DesiredLrpSchedulingInfos[i].Equal(that1.DesiredLrpSchedulingInfos[i]) { + return false + } + } + return true +} +func (this *DesiredLRPSchedulingInfoByProcessGuidResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPSchedulingInfoByProcessGuidResponse) + if !ok { + that2, ok := that.(DesiredLRPSchedulingInfoByProcessGuidResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + if !this.DesiredLrpSchedulingInfo.Equal(that1.DesiredLrpSchedulingInfo) { + return false + } + return true +} +func (this *DesiredLRPByProcessGuidRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPByProcessGuidRequest) + if !ok { + that2, ok := that.(DesiredLRPByProcessGuidRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ProcessGuid != that1.ProcessGuid { + return false + } + return true +} +func (this *DesireLRPRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesireLRPRequest) + if !ok { + that2, ok := that.(DesireLRPRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.DesiredLrp.Equal(that1.DesiredLrp) { + return false + } + return true +} +func (this *UpdateDesiredLRPRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UpdateDesiredLRPRequest) + if !ok { + that2, ok := that.(UpdateDesiredLRPRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ProcessGuid != that1.ProcessGuid { + return false + } + if !this.Update.Equal(that1.Update) { + return false + } + return true +} +func (this *RemoveDesiredLRPRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RemoveDesiredLRPRequest) + if !ok { + that2, ok := that.(RemoveDesiredLRPRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ProcessGuid != that1.ProcessGuid { + return false + } + return true +} +func (this *DesiredLRPLifecycleResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.DesiredLRPLifecycleResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.DesiredLRPsResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + if this.DesiredLrps != nil { + s = append(s, "DesiredLrps: "+fmt.Sprintf("%#v", this.DesiredLrps)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPsRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.DesiredLRPsRequest{") + s = append(s, "Domain: "+fmt.Sprintf("%#v", this.Domain)+",\n") + s = append(s, "ProcessGuids: "+fmt.Sprintf("%#v", this.ProcessGuids)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.DesiredLRPResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + if this.DesiredLrp != nil { + s = append(s, "DesiredLrp: "+fmt.Sprintf("%#v", this.DesiredLrp)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPSchedulingInfosResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.DesiredLRPSchedulingInfosResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + if this.DesiredLrpSchedulingInfos != nil { + s = append(s, "DesiredLrpSchedulingInfos: "+fmt.Sprintf("%#v", this.DesiredLrpSchedulingInfos)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPSchedulingInfoByProcessGuidResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.DesiredLRPSchedulingInfoByProcessGuidResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + if this.DesiredLrpSchedulingInfo != nil { + s = append(s, "DesiredLrpSchedulingInfo: "+fmt.Sprintf("%#v", this.DesiredLrpSchedulingInfo)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPByProcessGuidRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.DesiredLRPByProcessGuidRequest{") + s = append(s, "ProcessGuid: "+fmt.Sprintf("%#v", this.ProcessGuid)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesireLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.DesireLRPRequest{") + if this.DesiredLrp != nil { + s = append(s, "DesiredLrp: "+fmt.Sprintf("%#v", this.DesiredLrp)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UpdateDesiredLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.UpdateDesiredLRPRequest{") + s = append(s, "ProcessGuid: "+fmt.Sprintf("%#v", this.ProcessGuid)+",\n") + if this.Update != nil { + s = append(s, "Update: "+fmt.Sprintf("%#v", this.Update)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RemoveDesiredLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.RemoveDesiredLRPRequest{") + s = append(s, "ProcessGuid: "+fmt.Sprintf("%#v", this.ProcessGuid)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDesiredLrpRequests(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *DesiredLRPLifecycleResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPLifecycleResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPLifecycleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRPsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DesiredLrps) > 0 { + for iNdEx := len(m.DesiredLrps) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DesiredLrps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRPsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ProcessGuids) > 0 { + for iNdEx := len(m.ProcessGuids) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ProcessGuids[iNdEx]) + copy(dAtA[i:], m.ProcessGuids[iNdEx]) + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(len(m.ProcessGuids[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Domain) > 0 { + i -= len(m.Domain) + copy(dAtA[i:], m.Domain) + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(len(m.Domain))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRPResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DesiredLrp != nil { + { + size, err := m.DesiredLrp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRPSchedulingInfosResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPSchedulingInfosResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPSchedulingInfosResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DesiredLrpSchedulingInfos) > 0 { + for iNdEx := len(m.DesiredLrpSchedulingInfos) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DesiredLrpSchedulingInfos[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DesiredLrpSchedulingInfo != nil { + { + size, err := m.DesiredLrpSchedulingInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRPByProcessGuidRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPByProcessGuidRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPByProcessGuidRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ProcessGuid) > 0 { + i -= len(m.ProcessGuid) + copy(dAtA[i:], m.ProcessGuid) + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(len(m.ProcessGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DesireLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesireLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesireLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DesiredLrp != nil { + { + size, err := m.DesiredLrp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateDesiredLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateDesiredLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateDesiredLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Update != nil { + { + size, err := m.Update.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ProcessGuid) > 0 { + i -= len(m.ProcessGuid) + copy(dAtA[i:], m.ProcessGuid) + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(len(m.ProcessGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoveDesiredLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveDesiredLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemoveDesiredLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ProcessGuid) > 0 { + i -= len(m.ProcessGuid) + copy(dAtA[i:], m.ProcessGuid) + i = encodeVarintDesiredLrpRequests(dAtA, i, uint64(len(m.ProcessGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintDesiredLrpRequests(dAtA []byte, offset int, v uint64) int { + offset -= sovDesiredLrpRequests(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *DesiredLRPLifecycleResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + return n +} + +func (m *DesiredLRPsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + if len(m.DesiredLrps) > 0 { + for _, e := range m.DesiredLrps { + l = e.Size() + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + } + return n +} + +func (m *DesiredLRPsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Domain) + if l > 0 { + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + if len(m.ProcessGuids) > 0 { + for _, s := range m.ProcessGuids { + l = len(s) + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + } + return n +} + +func (m *DesiredLRPResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + if m.DesiredLrp != nil { + l = m.DesiredLrp.Size() + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + return n +} + +func (m *DesiredLRPSchedulingInfosResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + if len(m.DesiredLrpSchedulingInfos) > 0 { + for _, e := range m.DesiredLrpSchedulingInfos { + l = e.Size() + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + } + return n +} + +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + if m.DesiredLrpSchedulingInfo != nil { + l = m.DesiredLrpSchedulingInfo.Size() + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + return n +} + +func (m *DesiredLRPByProcessGuidRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProcessGuid) + if l > 0 { + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + return n +} + +func (m *DesireLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DesiredLrp != nil { + l = m.DesiredLrp.Size() + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + return n +} + +func (m *UpdateDesiredLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProcessGuid) + if l > 0 { + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + if m.Update != nil { + l = m.Update.Size() + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + return n +} + +func (m *RemoveDesiredLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProcessGuid) + if l > 0 { + n += 1 + l + sovDesiredLrpRequests(uint64(l)) + } + return n +} + +func sovDesiredLrpRequests(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDesiredLrpRequests(x uint64) (n int) { + return sovDesiredLrpRequests(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DesiredLRPLifecycleResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPLifecycleResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPsResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForDesiredLrps := "[]*DesiredLRP{" + for _, f := range this.DesiredLrps { + repeatedStringForDesiredLrps += strings.Replace(fmt.Sprintf("%v", f), "DesiredLRP", "DesiredLRP", 1) + "," + } + repeatedStringForDesiredLrps += "}" + s := strings.Join([]string{`&DesiredLRPsResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `DesiredLrps:` + repeatedStringForDesiredLrps + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPsRequest{`, + `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`, + `ProcessGuids:` + fmt.Sprintf("%v", this.ProcessGuids) + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `DesiredLrp:` + strings.Replace(fmt.Sprintf("%v", this.DesiredLrp), "DesiredLRP", "DesiredLRP", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPSchedulingInfosResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForDesiredLrpSchedulingInfos := "[]*DesiredLRPSchedulingInfo{" + for _, f := range this.DesiredLrpSchedulingInfos { + repeatedStringForDesiredLrpSchedulingInfos += strings.Replace(fmt.Sprintf("%v", f), "DesiredLRPSchedulingInfo", "DesiredLRPSchedulingInfo", 1) + "," + } + repeatedStringForDesiredLrpSchedulingInfos += "}" + s := strings.Join([]string{`&DesiredLRPSchedulingInfosResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `DesiredLrpSchedulingInfos:` + repeatedStringForDesiredLrpSchedulingInfos + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPSchedulingInfoByProcessGuidResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPSchedulingInfoByProcessGuidResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `DesiredLrpSchedulingInfo:` + strings.Replace(fmt.Sprintf("%v", this.DesiredLrpSchedulingInfo), "DesiredLRPSchedulingInfo", "DesiredLRPSchedulingInfo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPByProcessGuidRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPByProcessGuidRequest{`, + `ProcessGuid:` + fmt.Sprintf("%v", this.ProcessGuid) + `,`, + `}`, + }, "") + return s +} +func (this *DesireLRPRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesireLRPRequest{`, + `DesiredLrp:` + strings.Replace(fmt.Sprintf("%v", this.DesiredLrp), "DesiredLRP", "DesiredLRP", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateDesiredLRPRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateDesiredLRPRequest{`, + `ProcessGuid:` + fmt.Sprintf("%v", this.ProcessGuid) + `,`, + `Update:` + strings.Replace(fmt.Sprintf("%v", this.Update), "DesiredLRPUpdate", "DesiredLRPUpdate", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveDesiredLRPRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveDesiredLRPRequest{`, + `ProcessGuid:` + fmt.Sprintf("%v", this.ProcessGuid) + `,`, + `}`, + }, "") + return s +} +func valueToStringDesiredLrpRequests(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DesiredLRPLifecycleResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPLifecycleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPLifecycleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredLrps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DesiredLrps = append(m.DesiredLrps, &DesiredLRP{}) + if err := m.DesiredLrps[len(m.DesiredLrps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessGuids", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessGuids = append(m.ProcessGuids, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredLrp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DesiredLrp == nil { + m.DesiredLrp = &DesiredLRP{} + } + if err := m.DesiredLrp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPSchedulingInfosResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPSchedulingInfosResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPSchedulingInfosResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredLrpSchedulingInfos", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DesiredLrpSchedulingInfos = append(m.DesiredLrpSchedulingInfos, &DesiredLRPSchedulingInfo{}) + if err := m.DesiredLrpSchedulingInfos[len(m.DesiredLrpSchedulingInfos)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPSchedulingInfoByProcessGuidResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPSchedulingInfoByProcessGuidResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPSchedulingInfoByProcessGuidResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredLrpSchedulingInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DesiredLrpSchedulingInfo == nil { + m.DesiredLrpSchedulingInfo = &DesiredLRPSchedulingInfo{} + } + if err := m.DesiredLrpSchedulingInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPByProcessGuidRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPByProcessGuidRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPByProcessGuidRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesireLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesireLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesireLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredLrp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DesiredLrp == nil { + m.DesiredLrp = &DesiredLRP{} + } + if err := m.DesiredLrp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateDesiredLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateDesiredLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateDesiredLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Update", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Update == nil { + m.Update = &DesiredLRPUpdate{} + } + if err := m.Update.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveDesiredLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveDesiredLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveDesiredLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProcessGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDesiredLrpRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDesiredLrpRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDesiredLrpRequests(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDesiredLrpRequests + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDesiredLrpRequests + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupDesiredLrpRequests + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthDesiredLrpRequests + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthDesiredLrpRequests = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDesiredLrpRequests = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupDesiredLrpRequests = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/desired_lrp_requests.proto b/vendor/code.cloudfoundry.org/bbs/models/desired_lrp_requests.proto new file mode 100644 index 00000000..be746255 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/desired_lrp_requests.proto @@ -0,0 +1,53 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "desired_lrp.proto"; +import "error.proto"; + +message DesiredLRPLifecycleResponse { + Error error = 1; +} + +message DesiredLRPsResponse { + Error error = 1; + repeated DesiredLRP desired_lrps = 2; +} + +message DesiredLRPsRequest { + string domain = 1 [(gogoproto.jsontag) = "domain"]; + repeated string process_guids = 2; +} + +message DesiredLRPResponse { + Error error = 1; + DesiredLRP desired_lrp = 2; +} + +message DesiredLRPSchedulingInfosResponse { + Error error = 1; + repeated DesiredLRPSchedulingInfo desired_lrp_scheduling_infos = 2; +} + +message DesiredLRPSchedulingInfoByProcessGuidResponse { + Error error = 1; + DesiredLRPSchedulingInfo desired_lrp_scheduling_info = 2; +} + +message DesiredLRPByProcessGuidRequest { + string process_guid = 1 [(gogoproto.jsontag) = "process_guid"]; +} + +message DesireLRPRequest { + DesiredLRP desired_lrp = 1; +} + +message UpdateDesiredLRPRequest { + string process_guid = 1 [(gogoproto.jsontag) = "process_guid"]; + DesiredLRPUpdate update = 2; +} + +message RemoveDesiredLRPRequest { + string process_guid = 1 [(gogoproto.jsontag) = "process_guid"]; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/domain.pb.go b/vendor/code.cloudfoundry.org/bbs/models/domain.pb.go new file mode 100644 index 00000000..08777240 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/domain.pb.go @@ -0,0 +1,853 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: domain.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type DomainsResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + Domains []string `protobuf:"bytes,2,rep,name=domains,proto3" json:"domains,omitempty"` +} + +func (m *DomainsResponse) Reset() { *m = DomainsResponse{} } +func (*DomainsResponse) ProtoMessage() {} +func (*DomainsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_73e6234e76dbdb84, []int{0} +} +func (m *DomainsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DomainsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DomainsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DomainsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DomainsResponse.Merge(m, src) +} +func (m *DomainsResponse) XXX_Size() int { + return m.Size() +} +func (m *DomainsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DomainsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DomainsResponse proto.InternalMessageInfo + +func (m *DomainsResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *DomainsResponse) GetDomains() []string { + if m != nil { + return m.Domains + } + return nil +} + +type UpsertDomainResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *UpsertDomainResponse) Reset() { *m = UpsertDomainResponse{} } +func (*UpsertDomainResponse) ProtoMessage() {} +func (*UpsertDomainResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_73e6234e76dbdb84, []int{1} +} +func (m *UpsertDomainResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpsertDomainResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpsertDomainResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UpsertDomainResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpsertDomainResponse.Merge(m, src) +} +func (m *UpsertDomainResponse) XXX_Size() int { + return m.Size() +} +func (m *UpsertDomainResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UpsertDomainResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UpsertDomainResponse proto.InternalMessageInfo + +func (m *UpsertDomainResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +type UpsertDomainRequest struct { + Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain"` + Ttl uint32 `protobuf:"varint,2,opt,name=ttl,proto3" json:"ttl"` +} + +func (m *UpsertDomainRequest) Reset() { *m = UpsertDomainRequest{} } +func (*UpsertDomainRequest) ProtoMessage() {} +func (*UpsertDomainRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_73e6234e76dbdb84, []int{2} +} +func (m *UpsertDomainRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpsertDomainRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpsertDomainRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UpsertDomainRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpsertDomainRequest.Merge(m, src) +} +func (m *UpsertDomainRequest) XXX_Size() int { + return m.Size() +} +func (m *UpsertDomainRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpsertDomainRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpsertDomainRequest proto.InternalMessageInfo + +func (m *UpsertDomainRequest) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +func (m *UpsertDomainRequest) GetTtl() uint32 { + if m != nil { + return m.Ttl + } + return 0 +} + +func init() { + proto.RegisterType((*DomainsResponse)(nil), "models.DomainsResponse") + proto.RegisterType((*UpsertDomainResponse)(nil), "models.UpsertDomainResponse") + proto.RegisterType((*UpsertDomainRequest)(nil), "models.UpsertDomainRequest") +} + +func init() { proto.RegisterFile("domain.proto", fileDescriptor_73e6234e76dbdb84) } + +var fileDescriptor_73e6234e76dbdb84 = []byte{ + // 271 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x49, 0xc9, 0xcf, 0x4d, + 0xcc, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0xcb, 0xcd, 0x4f, 0x49, 0xcd, 0x29, + 0x96, 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, + 0xcf, 0xd7, 0x07, 0x4b, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0x26, 0xc5, + 0x9d, 0x5a, 0x54, 0x94, 0x5f, 0x04, 0xe1, 0x28, 0x05, 0x70, 0xf1, 0xbb, 0x80, 0xcd, 0x2c, 0x0e, + 0x4a, 0x2d, 0x2e, 0xc8, 0xcf, 0x2b, 0x4e, 0x15, 0x52, 0xe6, 0x62, 0x05, 0xab, 0x90, 0x60, 0x54, + 0x60, 0xd4, 0xe0, 0x36, 0xe2, 0xd5, 0x83, 0x58, 0xa3, 0xe7, 0x0a, 0x12, 0x0c, 0x82, 0xc8, 0x09, + 0x49, 0x70, 0xb1, 0x43, 0xdc, 0x52, 0x2c, 0xc1, 0xa4, 0xc0, 0xac, 0xc1, 0x19, 0x04, 0xe3, 0x2a, + 0x59, 0x73, 0x89, 0x84, 0x16, 0x14, 0xa7, 0x16, 0x95, 0x40, 0xcc, 0x25, 0xc9, 0x58, 0xa5, 0x10, + 0x2e, 0x61, 0x54, 0xcd, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x4a, 0x5c, 0x6c, 0x10, 0xe3, 0xc1, + 0x9a, 0x39, 0x9d, 0xb8, 0x5e, 0xdd, 0x93, 0x87, 0x8a, 0x04, 0x41, 0x69, 0x21, 0x49, 0x2e, 0xe6, + 0x92, 0x92, 0x1c, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x5e, 0x27, 0xf6, 0x57, 0xf7, 0xe4, 0x41, 0xdc, + 0x20, 0x10, 0xe1, 0x64, 0x72, 0xe1, 0xa1, 0x1c, 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, + 0x31, 0x36, 0x3c, 0x92, 0x63, 0x5c, 0xf1, 0x48, 0x8e, 0xe1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, + 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x7c, 0xf1, 0x48, 0x8e, 0xe1, 0xc3, 0x23, 0x39, 0xc6, 0x09, + 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x21, 0x89, 0x0d, 0x1c, 0x42, + 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2a, 0xd1, 0x20, 0xc4, 0x75, 0x01, 0x00, 0x00, +} + +func (this *DomainsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.DomainsResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + s = append(s, "Domains: "+fmt.Sprintf("%#v", this.Domains)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UpsertDomainResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.UpsertDomainResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UpsertDomainRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.UpsertDomainRequest{") + s = append(s, "Domain: "+fmt.Sprintf("%#v", this.Domain)+",\n") + s = append(s, "Ttl: "+fmt.Sprintf("%#v", this.Ttl)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDomain(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *DomainsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DomainsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DomainsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Domains) > 0 { + for iNdEx := len(m.Domains) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Domains[iNdEx]) + copy(dAtA[i:], m.Domains[iNdEx]) + i = encodeVarintDomain(dAtA, i, uint64(len(m.Domains[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDomain(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpsertDomainResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpsertDomainResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpsertDomainResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDomain(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpsertDomainRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpsertDomainRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpsertDomainRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Ttl != 0 { + i = encodeVarintDomain(dAtA, i, uint64(m.Ttl)) + i-- + dAtA[i] = 0x10 + } + if len(m.Domain) > 0 { + i -= len(m.Domain) + copy(dAtA[i:], m.Domain) + i = encodeVarintDomain(dAtA, i, uint64(len(m.Domain))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintDomain(dAtA []byte, offset int, v uint64) int { + offset -= sovDomain(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *DomainsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovDomain(uint64(l)) + } + if len(m.Domains) > 0 { + for _, s := range m.Domains { + l = len(s) + n += 1 + l + sovDomain(uint64(l)) + } + } + return n +} + +func (m *UpsertDomainResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovDomain(uint64(l)) + } + return n +} + +func (m *UpsertDomainRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Domain) + if l > 0 { + n += 1 + l + sovDomain(uint64(l)) + } + if m.Ttl != 0 { + n += 1 + sovDomain(uint64(m.Ttl)) + } + return n +} + +func sovDomain(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDomain(x uint64) (n int) { + return sovDomain(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DomainsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DomainsResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `Domains:` + fmt.Sprintf("%v", this.Domains) + `,`, + `}`, + }, "") + return s +} +func (this *UpsertDomainResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpsertDomainResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpsertDomainRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpsertDomainRequest{`, + `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`, + `Ttl:` + fmt.Sprintf("%v", this.Ttl) + `,`, + `}`, + }, "") + return s +} +func valueToStringDomain(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DomainsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDomain + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DomainsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DomainsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDomain + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDomain + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDomain + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domains", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDomain + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDomain + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDomain + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domains = append(m.Domains, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDomain(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDomain + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpsertDomainResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDomain + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpsertDomainResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpsertDomainResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDomain + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDomain + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDomain + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDomain(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDomain + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpsertDomainRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDomain + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpsertDomainRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpsertDomainRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDomain + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDomain + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDomain + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ttl", wireType) + } + m.Ttl = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDomain + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Ttl |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDomain(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDomain + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDomain(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDomain + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDomain + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDomain + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDomain + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupDomain + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthDomain + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthDomain = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDomain = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupDomain = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/domain.proto b/vendor/code.cloudfoundry.org/bbs/models/domain.proto new file mode 100644 index 00000000..7028f0f7 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/domain.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "error.proto"; + +option (gogoproto.equal_all) = false; + +message DomainsResponse { + Error error = 1; + repeated string domains = 2; +} + +message UpsertDomainResponse { + Error error = 1; +} + +message UpsertDomainRequest { + string domain = 1 [(gogoproto.jsontag) = "domain"]; + uint32 ttl = 2 [(gogoproto.jsontag) = "ttl"]; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/domains.go b/vendor/code.cloudfoundry.org/bbs/models/domains.go new file mode 100644 index 00000000..a94ee90c --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/domains.go @@ -0,0 +1,36 @@ +package models + +type DomainSet map[string]struct{} + +func (set DomainSet) Add(domain string) { + set[domain] = struct{}{} +} + +func (set DomainSet) Each(predicate func(domain string)) { + for domain := range set { + predicate(domain) + } +} + +func (set DomainSet) Contains(domain string) bool { + _, found := set[domain] + return found +} + +func NewDomainSet(domains []string) DomainSet { + domainSet := DomainSet{} + for _, domain := range domains { + domainSet.Add(domain) + } + return domainSet +} + +func (request *UpsertDomainRequest) Validate() error { + var validationError ValidationError + + if request.Domain == "" { + return validationError.Append(ErrInvalidField{"domain"}) + } + + return nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/environment_variables.go b/vendor/code.cloudfoundry.org/bbs/models/environment_variables.go new file mode 100644 index 00000000..49185848 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/environment_variables.go @@ -0,0 +1,10 @@ +package models + +import "errors" + +func (envVar EnvironmentVariable) Validate() error { + if envVar.Name == "" { + return errors.New("invalid field: name cannot be blank") + } + return nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/environment_variables.pb.go b/vendor/code.cloudfoundry.org/bbs/models/environment_variables.pb.go new file mode 100644 index 00000000..c3db470a --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/environment_variables.pb.go @@ -0,0 +1,436 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: environment_variables.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type EnvironmentVariable struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value"` +} + +func (m *EnvironmentVariable) Reset() { *m = EnvironmentVariable{} } +func (*EnvironmentVariable) ProtoMessage() {} +func (*EnvironmentVariable) Descriptor() ([]byte, []int) { + return fileDescriptor_8938dda491bd78a1, []int{0} +} +func (m *EnvironmentVariable) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvironmentVariable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnvironmentVariable.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnvironmentVariable) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvironmentVariable.Merge(m, src) +} +func (m *EnvironmentVariable) XXX_Size() int { + return m.Size() +} +func (m *EnvironmentVariable) XXX_DiscardUnknown() { + xxx_messageInfo_EnvironmentVariable.DiscardUnknown(m) +} + +var xxx_messageInfo_EnvironmentVariable proto.InternalMessageInfo + +func (m *EnvironmentVariable) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EnvironmentVariable) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func init() { + proto.RegisterType((*EnvironmentVariable)(nil), "models.EnvironmentVariable") +} + +func init() { proto.RegisterFile("environment_variables.proto", fileDescriptor_8938dda491bd78a1) } + +var fileDescriptor_8938dda491bd78a1 = []byte{ + // 209 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcd, 0x2b, 0xcb, + 0x2c, 0xca, 0xcf, 0xcb, 0x4d, 0xcd, 0x2b, 0x89, 0x2f, 0x4b, 0x2c, 0xca, 0x4c, 0x4c, 0xca, 0x49, + 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0xcb, 0xcd, 0x4f, 0x49, 0xcd, 0x29, 0x96, + 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, 0xcf, + 0xd7, 0x07, 0x4b, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xa6, 0x14, 0xc2, + 0x25, 0xec, 0x8a, 0x30, 0x35, 0x0c, 0x6a, 0xa8, 0x90, 0x0c, 0x17, 0x4b, 0x5e, 0x62, 0x6e, 0xaa, + 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x13, 0xc7, 0xab, 0x7b, 0xf2, 0x60, 0x7e, 0x10, 0x98, 0x14, + 0x92, 0xe7, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x95, 0x60, 0x02, 0x4b, 0x73, 0xbe, 0xba, 0x27, + 0x0f, 0x11, 0x08, 0x82, 0x50, 0x4e, 0x26, 0x17, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xf0, + 0xe1, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, 0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, + 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x3c, 0x92, + 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0x92, 0xd8, + 0xc0, 0x4e, 0x32, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x1b, 0x8a, 0x91, 0xe1, 0xe8, 0x00, 0x00, + 0x00, +} + +func (this *EnvironmentVariable) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*EnvironmentVariable) + if !ok { + that2, ok := that.(EnvironmentVariable) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *EnvironmentVariable) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.EnvironmentVariable{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringEnvironmentVariables(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *EnvironmentVariable) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvironmentVariable) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvironmentVariable) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintEnvironmentVariables(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintEnvironmentVariables(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintEnvironmentVariables(dAtA []byte, offset int, v uint64) int { + offset -= sovEnvironmentVariables(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *EnvironmentVariable) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovEnvironmentVariables(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovEnvironmentVariables(uint64(l)) + } + return n +} + +func sovEnvironmentVariables(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEnvironmentVariables(x uint64) (n int) { + return sovEnvironmentVariables(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *EnvironmentVariable) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvironmentVariable{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func valueToStringEnvironmentVariables(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *EnvironmentVariable) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvironmentVariables + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvironmentVariable: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvironmentVariable: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvironmentVariables + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvironmentVariables + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvironmentVariables + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvironmentVariables + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvironmentVariables + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvironmentVariables + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEnvironmentVariables(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEnvironmentVariables + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEnvironmentVariables(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEnvironmentVariables + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEnvironmentVariables + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEnvironmentVariables + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEnvironmentVariables + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEnvironmentVariables + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEnvironmentVariables + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEnvironmentVariables = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEnvironmentVariables = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEnvironmentVariables = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/environment_variables.proto b/vendor/code.cloudfoundry.org/bbs/models/environment_variables.proto new file mode 100644 index 00000000..390aa783 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/environment_variables.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +message EnvironmentVariable { + string name = 1 [(gogoproto.jsontag) = "name"]; + string value = 2 [(gogoproto.jsontag) = "value"]; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/error.pb.go b/vendor/code.cloudfoundry.org/bbs/models/error.pb.go new file mode 100644 index 00000000..039ca6d6 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/error.pb.go @@ -0,0 +1,515 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: error.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Error_Type int32 + +const ( + Error_UnknownError Error_Type = 0 + Error_InvalidRecord Error_Type = 3 + Error_InvalidRequest Error_Type = 4 + Error_InvalidResponse Error_Type = 5 + Error_InvalidProtobufMessage Error_Type = 6 + Error_InvalidJSON Error_Type = 7 + Error_FailedToOpenEnvelope Error_Type = 8 + Error_InvalidStateTransition Error_Type = 9 + Error_ResourceConflict Error_Type = 11 + Error_ResourceExists Error_Type = 12 + Error_ResourceNotFound Error_Type = 13 + Error_RouterError Error_Type = 14 + Error_ActualLRPCannotBeClaimed Error_Type = 15 + Error_ActualLRPCannotBeStarted Error_Type = 16 + Error_ActualLRPCannotBeCrashed Error_Type = 17 + Error_ActualLRPCannotBeFailed Error_Type = 18 + Error_ActualLRPCannotBeRemoved Error_Type = 19 + Error_ActualLRPCannotBeUnclaimed Error_Type = 21 + Error_RunningOnDifferentCell Error_Type = 24 + Error_GUIDGeneration Error_Type = 26 + Error_Deserialize Error_Type = 27 + Error_Deadlock Error_Type = 28 + Error_Unrecoverable Error_Type = 29 + Error_LockCollision Error_Type = 30 + Error_Timeout Error_Type = 31 +) + +var Error_Type_name = map[int32]string{ + 0: "UnknownError", + 3: "InvalidRecord", + 4: "InvalidRequest", + 5: "InvalidResponse", + 6: "InvalidProtobufMessage", + 7: "InvalidJSON", + 8: "FailedToOpenEnvelope", + 9: "InvalidStateTransition", + 11: "ResourceConflict", + 12: "ResourceExists", + 13: "ResourceNotFound", + 14: "RouterError", + 15: "ActualLRPCannotBeClaimed", + 16: "ActualLRPCannotBeStarted", + 17: "ActualLRPCannotBeCrashed", + 18: "ActualLRPCannotBeFailed", + 19: "ActualLRPCannotBeRemoved", + 21: "ActualLRPCannotBeUnclaimed", + 24: "RunningOnDifferentCell", + 26: "GUIDGeneration", + 27: "Deserialize", + 28: "Deadlock", + 29: "Unrecoverable", + 30: "LockCollision", + 31: "Timeout", +} + +var Error_Type_value = map[string]int32{ + "UnknownError": 0, + "InvalidRecord": 3, + "InvalidRequest": 4, + "InvalidResponse": 5, + "InvalidProtobufMessage": 6, + "InvalidJSON": 7, + "FailedToOpenEnvelope": 8, + "InvalidStateTransition": 9, + "ResourceConflict": 11, + "ResourceExists": 12, + "ResourceNotFound": 13, + "RouterError": 14, + "ActualLRPCannotBeClaimed": 15, + "ActualLRPCannotBeStarted": 16, + "ActualLRPCannotBeCrashed": 17, + "ActualLRPCannotBeFailed": 18, + "ActualLRPCannotBeRemoved": 19, + "ActualLRPCannotBeUnclaimed": 21, + "RunningOnDifferentCell": 24, + "GUIDGeneration": 26, + "Deserialize": 27, + "Deadlock": 28, + "Unrecoverable": 29, + "LockCollision": 30, + "Timeout": 31, +} + +func (Error_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_0579b252106fcf4a, []int{0, 0} +} + +type Error struct { + Type Error_Type `protobuf:"varint,1,opt,name=type,proto3,enum=models.Error_Type" json:"type"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message"` +} + +func (m *Error) Reset() { *m = Error{} } +func (*Error) ProtoMessage() {} +func (*Error) Descriptor() ([]byte, []int) { + return fileDescriptor_0579b252106fcf4a, []int{0} +} +func (m *Error) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Error.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Error) XXX_Merge(src proto.Message) { + xxx_messageInfo_Error.Merge(m, src) +} +func (m *Error) XXX_Size() int { + return m.Size() +} +func (m *Error) XXX_DiscardUnknown() { + xxx_messageInfo_Error.DiscardUnknown(m) +} + +var xxx_messageInfo_Error proto.InternalMessageInfo + +func (m *Error) GetType() Error_Type { + if m != nil { + return m.Type + } + return Error_UnknownError +} + +func (m *Error) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func init() { + proto.RegisterEnum("models.Error_Type", Error_Type_name, Error_Type_value) + proto.RegisterType((*Error)(nil), "models.Error") +} + +func init() { proto.RegisterFile("error.proto", fileDescriptor_0579b252106fcf4a) } + +var fileDescriptor_0579b252106fcf4a = []byte{ + // 585 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0xcd, 0x4e, 0xdb, 0x4c, + 0x14, 0xb5, 0xf9, 0x0c, 0x98, 0x09, 0x3f, 0x97, 0x21, 0x1f, 0x84, 0x40, 0x07, 0x64, 0xa9, 0x12, + 0x9b, 0x86, 0xaa, 0xed, 0x0b, 0x34, 0x09, 0x20, 0x2a, 0x0a, 0xc8, 0x24, 0x0f, 0x30, 0xb1, 0x6f, + 0xc2, 0x88, 0xc9, 0x4c, 0x3a, 0x1e, 0xa7, 0xa5, 0xab, 0x3e, 0x42, 0x1f, 0xa3, 0x8f, 0xd2, 0x45, + 0x17, 0x2c, 0x59, 0xa1, 0x62, 0x36, 0x2d, 0x2b, 0x1e, 0xa1, 0xb2, 0x13, 0x10, 0x12, 0x6c, 0xac, + 0x7b, 0xcf, 0xb9, 0xe7, 0xf8, 0x9e, 0x6b, 0x99, 0x94, 0xd0, 0x18, 0x6d, 0x6a, 0x03, 0xa3, 0xad, + 0xa6, 0x53, 0x7d, 0x1d, 0xa3, 0x4c, 0xaa, 0xaf, 0x7a, 0xc2, 0x9e, 0xa6, 0x9d, 0x5a, 0xa4, 0xfb, + 0xdb, 0x3d, 0xdd, 0xd3, 0xdb, 0x05, 0xdd, 0x49, 0xbb, 0x45, 0x57, 0x34, 0x45, 0x35, 0x92, 0x05, + 0xbf, 0x26, 0xc9, 0xe4, 0x4e, 0x6e, 0x43, 0x5f, 0x13, 0xcf, 0x9e, 0x0f, 0xb0, 0xe2, 0x6e, 0xba, + 0x5b, 0xf3, 0x6f, 0x68, 0x6d, 0xe4, 0x57, 0x2b, 0xc8, 0x5a, 0xeb, 0x7c, 0x80, 0x75, 0xff, 0xf6, + 0x6a, 0xa3, 0x98, 0x09, 0x8b, 0x27, 0x7d, 0x49, 0xa6, 0xfb, 0x98, 0x24, 0xbc, 0x87, 0x95, 0x89, + 0x4d, 0x77, 0x6b, 0xa6, 0x5e, 0xba, 0xbd, 0xda, 0xb8, 0x87, 0xc2, 0xfb, 0x22, 0xf8, 0xeb, 0x11, + 0x2f, 0xd7, 0x53, 0x20, 0xb3, 0x6d, 0x75, 0xa6, 0xf4, 0x67, 0x55, 0x98, 0x82, 0x43, 0x17, 0xc9, + 0xdc, 0xbe, 0x1a, 0x72, 0x29, 0xe2, 0x10, 0x23, 0x6d, 0x62, 0xf8, 0x8f, 0x52, 0x32, 0xff, 0x00, + 0x7d, 0x4a, 0x31, 0xb1, 0xe0, 0xd1, 0x25, 0xb2, 0xf0, 0x80, 0x25, 0x03, 0xad, 0x12, 0x84, 0x49, + 0x5a, 0x25, 0xcb, 0x63, 0xf0, 0x78, 0x9c, 0xf0, 0xe3, 0xe8, 0x85, 0x30, 0x45, 0x17, 0x48, 0x69, + 0xcc, 0x7d, 0x38, 0x39, 0x3a, 0x84, 0x69, 0x5a, 0x21, 0xe5, 0x5d, 0x2e, 0x24, 0xc6, 0x2d, 0x7d, + 0x34, 0x40, 0xb5, 0xa3, 0x86, 0x28, 0xf5, 0x00, 0xc1, 0x7f, 0x64, 0x73, 0x62, 0xb9, 0xc5, 0x96, + 0xe1, 0x2a, 0x11, 0x56, 0x68, 0x05, 0x33, 0xb4, 0x4c, 0x20, 0xc4, 0x44, 0xa7, 0x26, 0xc2, 0x86, + 0x56, 0x5d, 0x29, 0x22, 0x0b, 0xa5, 0x7c, 0xc3, 0x7b, 0x74, 0xe7, 0x8b, 0x48, 0x6c, 0x02, 0xb3, + 0x8f, 0x27, 0x0f, 0xb5, 0xdd, 0xd5, 0xa9, 0x8a, 0x61, 0x2e, 0x5f, 0x23, 0xd4, 0xa9, 0x45, 0x33, + 0xca, 0x3b, 0x4f, 0xd7, 0x49, 0xe5, 0x7d, 0x64, 0x53, 0x2e, 0x0f, 0xc2, 0xe3, 0x06, 0x57, 0x4a, + 0xdb, 0x3a, 0x36, 0x24, 0x17, 0x7d, 0x8c, 0x61, 0xe1, 0x59, 0xf6, 0xc4, 0x72, 0x63, 0x31, 0x06, + 0x78, 0x5e, 0x6b, 0x78, 0x72, 0x8a, 0x31, 0x2c, 0xd2, 0x35, 0xb2, 0xf2, 0x84, 0x1d, 0x25, 0x06, + 0xfa, 0xac, 0x34, 0xc4, 0xbe, 0x1e, 0x62, 0x0c, 0x4b, 0x94, 0x91, 0xea, 0x13, 0xb6, 0xad, 0xa2, + 0xf1, 0x5a, 0xff, 0xe7, 0x17, 0x0a, 0x53, 0xa5, 0x84, 0xea, 0x1d, 0xa9, 0xa6, 0xe8, 0x76, 0xd1, + 0xa0, 0xb2, 0x0d, 0x94, 0x12, 0x2a, 0xf9, 0x2d, 0xf6, 0xda, 0xfb, 0xcd, 0x3d, 0x54, 0x68, 0x78, + 0x71, 0xb5, 0x6a, 0x9e, 0xba, 0x89, 0x09, 0x1a, 0xc1, 0xa5, 0xf8, 0x8a, 0xb0, 0x46, 0x67, 0x89, + 0xdf, 0x44, 0x1e, 0x4b, 0x1d, 0x9d, 0xc1, 0x7a, 0xfe, 0xcd, 0xdb, 0xca, 0x60, 0xa4, 0x87, 0x68, + 0x78, 0x47, 0x22, 0xbc, 0xc8, 0xa1, 0x03, 0x1d, 0x9d, 0x35, 0xb4, 0x94, 0x22, 0xc9, 0x4d, 0x18, + 0x2d, 0x91, 0xe9, 0x96, 0xe8, 0xa3, 0x4e, 0x2d, 0x6c, 0x04, 0x9e, 0xef, 0x82, 0x1b, 0x78, 0xfe, + 0x04, 0x4c, 0x04, 0x9e, 0x4f, 0x80, 0x04, 0x9e, 0x5f, 0x86, 0x72, 0xe0, 0xf9, 0xcb, 0xb0, 0x1c, + 0x78, 0xfe, 0x0a, 0xac, 0x04, 0x9e, 0xbf, 0x0a, 0xab, 0xf5, 0x77, 0x17, 0xd7, 0xcc, 0xbd, 0xbc, + 0x66, 0xce, 0xdd, 0x35, 0x73, 0xbf, 0x65, 0xcc, 0xfd, 0x91, 0x31, 0xe7, 0x67, 0xc6, 0xdc, 0x8b, + 0x8c, 0xb9, 0xbf, 0x33, 0xe6, 0xfe, 0xc9, 0x98, 0x73, 0x97, 0x31, 0xf7, 0xfb, 0x0d, 0x73, 0x2e, + 0x6e, 0x98, 0x73, 0x79, 0xc3, 0x9c, 0xce, 0x54, 0xf1, 0x2f, 0xbc, 0xfd, 0x17, 0x00, 0x00, 0xff, + 0xff, 0xf2, 0xb4, 0xa0, 0xf2, 0x51, 0x03, 0x00, 0x00, +} + +func (x Error_Type) String() string { + s, ok := Error_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *Error) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.Error{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "Message: "+fmt.Sprintf("%#v", this.Message)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringError(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Error) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Error) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Error) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintError(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintError(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintError(dAtA []byte, offset int, v uint64) int { + offset -= sovError(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Error) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovError(uint64(m.Type)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovError(uint64(l)) + } + return n +} + +func sovError(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozError(x uint64) (n int) { + return sovError(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Error) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Error{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func valueToStringError(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Error) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowError + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Error: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Error: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowError + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Error_Type(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowError + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthError + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthError + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipError(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthError + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipError(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowError + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowError + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowError + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthError + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupError + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthError + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthError = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowError = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupError = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/error.proto b/vendor/code.cloudfoundry.org/bbs/models/error.proto new file mode 100644 index 00000000..7aa1a9c8 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/error.proto @@ -0,0 +1,52 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = false; +option (gogoproto.goproto_enum_prefix_all) = true; + +message Error { + enum Type { + reserved 1, 2, 10, 20, 22, 23, 25; // previously used and removed values + + UnknownError = 0; + + InvalidRecord = 3; + InvalidRequest = 4; + InvalidResponse = 5; + InvalidProtobufMessage = 6; + InvalidJSON = 7; + FailedToOpenEnvelope = 8; + InvalidStateTransition = 9; + + ResourceConflict = 11; + ResourceExists = 12; + ResourceNotFound = 13; + RouterError = 14; + + ActualLRPCannotBeClaimed = 15; + ActualLRPCannotBeStarted = 16; + ActualLRPCannotBeCrashed = 17; + ActualLRPCannotBeFailed = 18; + ActualLRPCannotBeRemoved = 19; + ActualLRPCannotBeUnclaimed = 21; + + RunningOnDifferentCell = 24; + + GUIDGeneration = 26; + + Deserialize = 27; + + Deadlock = 28; + Unrecoverable = 29; + + LockCollision = 30; + + Timeout = 31; + } + + Type type = 1 [(gogoproto.jsontag) = "type"]; + string message = 2 [(gogoproto.jsontag) = "message"]; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/errors.go b/vendor/code.cloudfoundry.org/bbs/models/errors.go new file mode 100644 index 00000000..fec8e3f7 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/errors.go @@ -0,0 +1,186 @@ +package models + +import ( + "encoding/json" + "errors" + "fmt" +) + +func NewError(errType Error_Type, msg string) *Error { + return &Error{ + Type: errType, + Message: msg, + } +} + +func ConvertError(err error) *Error { + if err == nil { + return nil + } + + modelErr, ok := err.(*Error) + if !ok { + modelErr = NewError(Error_UnknownError, err.Error()) + } + return modelErr +} + +func (err *Error) ToError() error { + if err == nil { + return nil + } + return err +} + +func (err *Error) Equal(other error) bool { + if e, ok := other.(*Error); ok { + if err == nil && e != nil { + return false + } + return e.GetType() == err.GetType() + } + return false +} + +func (err *Error) Error() string { + return err.GetMessage() +} + +func (d *Error_Type) UnmarshalJSON(data []byte) error { + var name string + if err := json.Unmarshal(data, &name); err != nil { + return err + } + + if v, found := Error_Type_value[name]; found { + *d = Error_Type(v) + return nil + } + return fmt.Errorf("invalid presence: %s", name) +} + +func (d Error_Type) MarshalJSON() ([]byte, error) { + return json.Marshal(d.String()) +} + +var ( + ErrResourceNotFound = &Error{ + Type: Error_ResourceNotFound, + Message: "the requested resource could not be found", + } + + ErrResourceExists = &Error{ + Type: Error_ResourceExists, + Message: "the requested resource already exists", + } + + ErrResourceConflict = &Error{ + Type: Error_ResourceConflict, + Message: "the requested resource is in a conflicting state", + } + + ErrDeadlock = &Error{ + Type: Error_Deadlock, + Message: "the request failed due to deadlock", + } + + ErrBadRequest = &Error{ + Type: Error_InvalidRequest, + Message: "the request received is invalid", + } + + ErrUnknownError = &Error{ + Type: Error_UnknownError, + Message: "the request failed for an unknown reason", + } + + ErrDeserialize = &Error{ + Type: Error_Deserialize, + Message: "could not deserialize record", + } + + ErrFailedToOpenEnvelope = &Error{ + Type: Error_FailedToOpenEnvelope, + Message: "could not open envelope", + } + + ErrActualLRPCannotBeClaimed = &Error{ + Type: Error_ActualLRPCannotBeClaimed, + Message: "cannot claim actual LRP", + } + + ErrActualLRPCannotBeStarted = &Error{ + Type: Error_ActualLRPCannotBeStarted, + Message: "cannot start actual LRP", + } + + ErrActualLRPCannotBeCrashed = &Error{ + Type: Error_ActualLRPCannotBeCrashed, + Message: "cannot crash actual LRP", + } + + ErrActualLRPCannotBeFailed = &Error{ + Type: Error_ActualLRPCannotBeFailed, + Message: "cannot fail actual LRP", + } + + ErrActualLRPCannotBeRemoved = &Error{ + Type: Error_ActualLRPCannotBeRemoved, + Message: "cannot remove actual LRP", + } + + ErrActualLRPCannotBeUnclaimed = &Error{ + Type: Error_ActualLRPCannotBeUnclaimed, + Message: "cannot unclaim actual LRP", + } + + ErrGUIDGeneration = &Error{ + Type: Error_GUIDGeneration, + Message: "cannot generate random guid", + } + + ErrLockCollision = &Error{ + Type: Error_LockCollision, + Message: "lock already exists", + } +) + +type ErrInvalidField struct { + Field string +} + +func (err ErrInvalidField) Error() string { + return "Invalid field: " + err.Field +} + +type ErrInvalidModification struct { + InvalidField string +} + +func (err ErrInvalidModification) Error() string { + return "attempt to make invalid change to field: " + err.InvalidField +} + +// Deprecated: use the ActualLRPInstance API instead +var ErrActualLRPGroupInvalid = errors.New("ActualLRPGroup invalid") + +func NewTaskTransitionError(from, to Task_State) *Error { + return &Error{ + Type: Error_InvalidStateTransition, + Message: fmt.Sprintf("Cannot transition from %s to %s", from.String(), to.String()), + } +} + +func NewRunningOnDifferentCellError(expectedCellId, actualCellId string) *Error { + return &Error{ + Type: Error_RunningOnDifferentCell, + Message: fmt.Sprintf("Running on cell %s not %s", actualCellId, expectedCellId), + } +} + +func NewUnrecoverableError(err error) *Error { + return &Error{ + Type: Error_Unrecoverable, + Message: fmt.Sprint("Unrecoverable Error: ", err), + } +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/evacuation.go b/vendor/code.cloudfoundry.org/bbs/models/evacuation.go new file mode 100644 index 00000000..82aa828e --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/evacuation.go @@ -0,0 +1,12 @@ +package models + +func (request *EvacuateRunningActualLRPRequest) SetRoutable(routable bool) { + request.OptionalRoutable = &EvacuateRunningActualLRPRequest_Routable{ + Routable: routable, + } +} + +func (request *EvacuateRunningActualLRPRequest) RoutableExists() bool { + _, ok := request.GetOptionalRoutable().(*EvacuateRunningActualLRPRequest_Routable) + return ok +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/evacuation.pb.go b/vendor/code.cloudfoundry.org/bbs/models/evacuation.pb.go new file mode 100644 index 00000000..b759f031 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/evacuation.pb.go @@ -0,0 +1,2503 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: evacuation.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type EvacuationResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + KeepContainer bool `protobuf:"varint,2,opt,name=keep_container,json=keepContainer,proto3" json:"keep_container"` +} + +func (m *EvacuationResponse) Reset() { *m = EvacuationResponse{} } +func (*EvacuationResponse) ProtoMessage() {} +func (*EvacuationResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5cec7f656fd69c9d, []int{0} +} +func (m *EvacuationResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EvacuationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EvacuationResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EvacuationResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_EvacuationResponse.Merge(m, src) +} +func (m *EvacuationResponse) XXX_Size() int { + return m.Size() +} +func (m *EvacuationResponse) XXX_DiscardUnknown() { + xxx_messageInfo_EvacuationResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_EvacuationResponse proto.InternalMessageInfo + +func (m *EvacuationResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *EvacuationResponse) GetKeepContainer() bool { + if m != nil { + return m.KeepContainer + } + return false +} + +type EvacuateClaimedActualLRPRequest struct { + ActualLrpKey *ActualLRPKey `protobuf:"bytes,1,opt,name=actual_lrp_key,json=actualLrpKey,proto3" json:"actual_lrp_key,omitempty"` + ActualLrpInstanceKey *ActualLRPInstanceKey `protobuf:"bytes,2,opt,name=actual_lrp_instance_key,json=actualLrpInstanceKey,proto3" json:"actual_lrp_instance_key,omitempty"` +} + +func (m *EvacuateClaimedActualLRPRequest) Reset() { *m = EvacuateClaimedActualLRPRequest{} } +func (*EvacuateClaimedActualLRPRequest) ProtoMessage() {} +func (*EvacuateClaimedActualLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5cec7f656fd69c9d, []int{1} +} +func (m *EvacuateClaimedActualLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EvacuateClaimedActualLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EvacuateClaimedActualLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EvacuateClaimedActualLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_EvacuateClaimedActualLRPRequest.Merge(m, src) +} +func (m *EvacuateClaimedActualLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *EvacuateClaimedActualLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_EvacuateClaimedActualLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_EvacuateClaimedActualLRPRequest proto.InternalMessageInfo + +func (m *EvacuateClaimedActualLRPRequest) GetActualLrpKey() *ActualLRPKey { + if m != nil { + return m.ActualLrpKey + } + return nil +} + +func (m *EvacuateClaimedActualLRPRequest) GetActualLrpInstanceKey() *ActualLRPInstanceKey { + if m != nil { + return m.ActualLrpInstanceKey + } + return nil +} + +type EvacuateRunningActualLRPRequest struct { + ActualLrpKey *ActualLRPKey `protobuf:"bytes,1,opt,name=actual_lrp_key,json=actualLrpKey,proto3" json:"actual_lrp_key,omitempty"` + ActualLrpInstanceKey *ActualLRPInstanceKey `protobuf:"bytes,2,opt,name=actual_lrp_instance_key,json=actualLrpInstanceKey,proto3" json:"actual_lrp_instance_key,omitempty"` + ActualLrpNetInfo *ActualLRPNetInfo `protobuf:"bytes,3,opt,name=actual_lrp_net_info,json=actualLrpNetInfo,proto3" json:"actual_lrp_net_info,omitempty"` + ActualLrpInternalRoutes []*ActualLRPInternalRoute `protobuf:"bytes,5,rep,name=actual_lrp_internal_routes,json=actualLrpInternalRoutes,proto3" json:"actual_lrp_internal_routes,omitempty"` + MetricTags map[string]string `protobuf:"bytes,6,rep,name=metric_tags,json=metricTags,proto3" json:"metric_tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Types that are valid to be assigned to OptionalRoutable: + // *EvacuateRunningActualLRPRequest_Routable + OptionalRoutable isEvacuateRunningActualLRPRequest_OptionalRoutable `protobuf_oneof:"optional_routable"` + AvailabilityZone string `protobuf:"bytes,8,opt,name=availability_zone,json=availabilityZone,proto3" json:"availability_zone"` +} + +func (m *EvacuateRunningActualLRPRequest) Reset() { *m = EvacuateRunningActualLRPRequest{} } +func (*EvacuateRunningActualLRPRequest) ProtoMessage() {} +func (*EvacuateRunningActualLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5cec7f656fd69c9d, []int{2} +} +func (m *EvacuateRunningActualLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EvacuateRunningActualLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EvacuateRunningActualLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EvacuateRunningActualLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_EvacuateRunningActualLRPRequest.Merge(m, src) +} +func (m *EvacuateRunningActualLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *EvacuateRunningActualLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_EvacuateRunningActualLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_EvacuateRunningActualLRPRequest proto.InternalMessageInfo + +type isEvacuateRunningActualLRPRequest_OptionalRoutable interface { + isEvacuateRunningActualLRPRequest_OptionalRoutable() + MarshalTo([]byte) (int, error) + Size() int +} + +type EvacuateRunningActualLRPRequest_Routable struct { + Routable bool `protobuf:"varint,7,opt,name=Routable,proto3,oneof" json:"Routable,omitempty"` +} + +func (*EvacuateRunningActualLRPRequest_Routable) isEvacuateRunningActualLRPRequest_OptionalRoutable() { +} + +func (m *EvacuateRunningActualLRPRequest) GetOptionalRoutable() isEvacuateRunningActualLRPRequest_OptionalRoutable { + if m != nil { + return m.OptionalRoutable + } + return nil +} + +func (m *EvacuateRunningActualLRPRequest) GetActualLrpKey() *ActualLRPKey { + if m != nil { + return m.ActualLrpKey + } + return nil +} + +func (m *EvacuateRunningActualLRPRequest) GetActualLrpInstanceKey() *ActualLRPInstanceKey { + if m != nil { + return m.ActualLrpInstanceKey + } + return nil +} + +func (m *EvacuateRunningActualLRPRequest) GetActualLrpNetInfo() *ActualLRPNetInfo { + if m != nil { + return m.ActualLrpNetInfo + } + return nil +} + +func (m *EvacuateRunningActualLRPRequest) GetActualLrpInternalRoutes() []*ActualLRPInternalRoute { + if m != nil { + return m.ActualLrpInternalRoutes + } + return nil +} + +func (m *EvacuateRunningActualLRPRequest) GetMetricTags() map[string]string { + if m != nil { + return m.MetricTags + } + return nil +} + +func (m *EvacuateRunningActualLRPRequest) GetRoutable() bool { + if x, ok := m.GetOptionalRoutable().(*EvacuateRunningActualLRPRequest_Routable); ok { + return x.Routable + } + return false +} + +func (m *EvacuateRunningActualLRPRequest) GetAvailabilityZone() string { + if m != nil { + return m.AvailabilityZone + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*EvacuateRunningActualLRPRequest) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*EvacuateRunningActualLRPRequest_Routable)(nil), + } +} + +type EvacuateStoppedActualLRPRequest struct { + ActualLrpKey *ActualLRPKey `protobuf:"bytes,1,opt,name=actual_lrp_key,json=actualLrpKey,proto3" json:"actual_lrp_key,omitempty"` + ActualLrpInstanceKey *ActualLRPInstanceKey `protobuf:"bytes,2,opt,name=actual_lrp_instance_key,json=actualLrpInstanceKey,proto3" json:"actual_lrp_instance_key,omitempty"` +} + +func (m *EvacuateStoppedActualLRPRequest) Reset() { *m = EvacuateStoppedActualLRPRequest{} } +func (*EvacuateStoppedActualLRPRequest) ProtoMessage() {} +func (*EvacuateStoppedActualLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5cec7f656fd69c9d, []int{3} +} +func (m *EvacuateStoppedActualLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EvacuateStoppedActualLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EvacuateStoppedActualLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EvacuateStoppedActualLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_EvacuateStoppedActualLRPRequest.Merge(m, src) +} +func (m *EvacuateStoppedActualLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *EvacuateStoppedActualLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_EvacuateStoppedActualLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_EvacuateStoppedActualLRPRequest proto.InternalMessageInfo + +func (m *EvacuateStoppedActualLRPRequest) GetActualLrpKey() *ActualLRPKey { + if m != nil { + return m.ActualLrpKey + } + return nil +} + +func (m *EvacuateStoppedActualLRPRequest) GetActualLrpInstanceKey() *ActualLRPInstanceKey { + if m != nil { + return m.ActualLrpInstanceKey + } + return nil +} + +type EvacuateCrashedActualLRPRequest struct { + ActualLrpKey *ActualLRPKey `protobuf:"bytes,1,opt,name=actual_lrp_key,json=actualLrpKey,proto3" json:"actual_lrp_key,omitempty"` + ActualLrpInstanceKey *ActualLRPInstanceKey `protobuf:"bytes,2,opt,name=actual_lrp_instance_key,json=actualLrpInstanceKey,proto3" json:"actual_lrp_instance_key,omitempty"` + ErrorMessage string `protobuf:"bytes,3,opt,name=error_message,json=errorMessage,proto3" json:"error_message"` +} + +func (m *EvacuateCrashedActualLRPRequest) Reset() { *m = EvacuateCrashedActualLRPRequest{} } +func (*EvacuateCrashedActualLRPRequest) ProtoMessage() {} +func (*EvacuateCrashedActualLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5cec7f656fd69c9d, []int{4} +} +func (m *EvacuateCrashedActualLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EvacuateCrashedActualLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EvacuateCrashedActualLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EvacuateCrashedActualLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_EvacuateCrashedActualLRPRequest.Merge(m, src) +} +func (m *EvacuateCrashedActualLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *EvacuateCrashedActualLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_EvacuateCrashedActualLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_EvacuateCrashedActualLRPRequest proto.InternalMessageInfo + +func (m *EvacuateCrashedActualLRPRequest) GetActualLrpKey() *ActualLRPKey { + if m != nil { + return m.ActualLrpKey + } + return nil +} + +func (m *EvacuateCrashedActualLRPRequest) GetActualLrpInstanceKey() *ActualLRPInstanceKey { + if m != nil { + return m.ActualLrpInstanceKey + } + return nil +} + +func (m *EvacuateCrashedActualLRPRequest) GetErrorMessage() string { + if m != nil { + return m.ErrorMessage + } + return "" +} + +type RemoveEvacuatingActualLRPRequest struct { + ActualLrpKey *ActualLRPKey `protobuf:"bytes,1,opt,name=actual_lrp_key,json=actualLrpKey,proto3" json:"actual_lrp_key,omitempty"` + ActualLrpInstanceKey *ActualLRPInstanceKey `protobuf:"bytes,2,opt,name=actual_lrp_instance_key,json=actualLrpInstanceKey,proto3" json:"actual_lrp_instance_key,omitempty"` +} + +func (m *RemoveEvacuatingActualLRPRequest) Reset() { *m = RemoveEvacuatingActualLRPRequest{} } +func (*RemoveEvacuatingActualLRPRequest) ProtoMessage() {} +func (*RemoveEvacuatingActualLRPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5cec7f656fd69c9d, []int{5} +} +func (m *RemoveEvacuatingActualLRPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemoveEvacuatingActualLRPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemoveEvacuatingActualLRPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemoveEvacuatingActualLRPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveEvacuatingActualLRPRequest.Merge(m, src) +} +func (m *RemoveEvacuatingActualLRPRequest) XXX_Size() int { + return m.Size() +} +func (m *RemoveEvacuatingActualLRPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveEvacuatingActualLRPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveEvacuatingActualLRPRequest proto.InternalMessageInfo + +func (m *RemoveEvacuatingActualLRPRequest) GetActualLrpKey() *ActualLRPKey { + if m != nil { + return m.ActualLrpKey + } + return nil +} + +func (m *RemoveEvacuatingActualLRPRequest) GetActualLrpInstanceKey() *ActualLRPInstanceKey { + if m != nil { + return m.ActualLrpInstanceKey + } + return nil +} + +type RemoveEvacuatingActualLRPResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *RemoveEvacuatingActualLRPResponse) Reset() { *m = RemoveEvacuatingActualLRPResponse{} } +func (*RemoveEvacuatingActualLRPResponse) ProtoMessage() {} +func (*RemoveEvacuatingActualLRPResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5cec7f656fd69c9d, []int{6} +} +func (m *RemoveEvacuatingActualLRPResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemoveEvacuatingActualLRPResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemoveEvacuatingActualLRPResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemoveEvacuatingActualLRPResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveEvacuatingActualLRPResponse.Merge(m, src) +} +func (m *RemoveEvacuatingActualLRPResponse) XXX_Size() int { + return m.Size() +} +func (m *RemoveEvacuatingActualLRPResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveEvacuatingActualLRPResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveEvacuatingActualLRPResponse proto.InternalMessageInfo + +func (m *RemoveEvacuatingActualLRPResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func init() { + proto.RegisterType((*EvacuationResponse)(nil), "models.EvacuationResponse") + proto.RegisterType((*EvacuateClaimedActualLRPRequest)(nil), "models.EvacuateClaimedActualLRPRequest") + proto.RegisterType((*EvacuateRunningActualLRPRequest)(nil), "models.EvacuateRunningActualLRPRequest") + proto.RegisterMapType((map[string]string)(nil), "models.EvacuateRunningActualLRPRequest.MetricTagsEntry") + proto.RegisterType((*EvacuateStoppedActualLRPRequest)(nil), "models.EvacuateStoppedActualLRPRequest") + proto.RegisterType((*EvacuateCrashedActualLRPRequest)(nil), "models.EvacuateCrashedActualLRPRequest") + proto.RegisterType((*RemoveEvacuatingActualLRPRequest)(nil), "models.RemoveEvacuatingActualLRPRequest") + proto.RegisterType((*RemoveEvacuatingActualLRPResponse)(nil), "models.RemoveEvacuatingActualLRPResponse") +} + +func init() { proto.RegisterFile("evacuation.proto", fileDescriptor_5cec7f656fd69c9d) } + +var fileDescriptor_5cec7f656fd69c9d = []byte{ + // 628 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x55, 0xcd, 0x4e, 0xdb, 0x4c, + 0x14, 0xf5, 0x00, 0xe1, 0x4b, 0x26, 0xc0, 0x17, 0x06, 0x2a, 0xac, 0x08, 0x4d, 0xd2, 0x74, 0x93, + 0x4d, 0x83, 0x44, 0xab, 0xfe, 0x20, 0x75, 0xd1, 0x20, 0x54, 0x28, 0x50, 0x55, 0x43, 0x17, 0x55, + 0xbb, 0xb0, 0x26, 0xe1, 0x62, 0x2c, 0xec, 0x19, 0xd7, 0x1e, 0x47, 0x4a, 0x57, 0x7d, 0x84, 0x3e, + 0x46, 0xd7, 0xed, 0x4b, 0x74, 0xc9, 0x92, 0x15, 0x2a, 0x66, 0x53, 0x65, 0x51, 0xf1, 0x08, 0x95, + 0xc7, 0x89, 0x31, 0x50, 0x21, 0x75, 0xd7, 0xec, 0xe6, 0x9c, 0x7b, 0xef, 0xb9, 0x47, 0x33, 0x73, + 0x67, 0x70, 0x05, 0x7a, 0xbc, 0x1b, 0x71, 0xe5, 0x48, 0xd1, 0xf2, 0x03, 0xa9, 0x24, 0x99, 0xf6, + 0xe4, 0x3e, 0xb8, 0x61, 0xf5, 0xbe, 0xed, 0xa8, 0xc3, 0xa8, 0xd3, 0xea, 0x4a, 0x6f, 0xc5, 0x96, + 0xb6, 0x5c, 0xd1, 0xe1, 0x4e, 0x74, 0xa0, 0x91, 0x06, 0x7a, 0x95, 0x96, 0x55, 0x2b, 0xbc, 0xab, + 0x22, 0xee, 0x5a, 0x6e, 0xe0, 0x0f, 0x99, 0x32, 0x04, 0x81, 0x0c, 0x52, 0xd0, 0x50, 0x98, 0x6c, + 0x64, 0x9d, 0x18, 0x84, 0xbe, 0x14, 0x21, 0x90, 0x7b, 0xb8, 0xa0, 0x93, 0x4c, 0x54, 0x47, 0xcd, + 0xf2, 0xea, 0x6c, 0x2b, 0xed, 0xdd, 0xda, 0x48, 0x48, 0x96, 0xc6, 0xc8, 0x53, 0x3c, 0x77, 0x04, + 0xe0, 0x5b, 0x5d, 0x29, 0x14, 0x77, 0x04, 0x04, 0xe6, 0x44, 0x1d, 0x35, 0x8b, 0x6d, 0x32, 0x38, + 0xad, 0x5d, 0x8b, 0xb0, 0xd9, 0x04, 0xaf, 0x8f, 0x60, 0xe3, 0x2b, 0xc2, 0xb5, 0x61, 0x5b, 0x58, + 0x77, 0xb9, 0xe3, 0xc1, 0xfe, 0x73, 0x6d, 0x73, 0x87, 0xbd, 0x66, 0xf0, 0x21, 0x82, 0x50, 0x91, + 0x35, 0x3c, 0x77, 0x69, 0xdd, 0x3a, 0x82, 0xfe, 0xd0, 0xcc, 0xe2, 0xc8, 0x4c, 0x56, 0xb1, 0x0d, + 0x7d, 0x36, 0x93, 0xe6, 0xee, 0x04, 0xfe, 0x36, 0xf4, 0xc9, 0x1e, 0x5e, 0xca, 0xd5, 0x3a, 0x22, + 0x54, 0x5c, 0x74, 0x41, 0x8b, 0x4c, 0x68, 0x91, 0xe5, 0x1b, 0x22, 0x5b, 0xc3, 0xa4, 0x44, 0x6c, + 0x31, 0x13, 0xcb, 0xb1, 0x8d, 0x5f, 0x53, 0x97, 0xa6, 0x59, 0x24, 0x84, 0x23, 0xec, 0x7f, 0xde, + 0x34, 0x79, 0x81, 0x17, 0x72, 0xa2, 0x02, 0x94, 0xe5, 0x88, 0x03, 0x69, 0x4e, 0x6a, 0x41, 0xf3, + 0x86, 0xe0, 0x2b, 0x50, 0x5b, 0xe2, 0x40, 0xb2, 0x4a, 0x26, 0x36, 0x64, 0xc8, 0x7b, 0x5c, 0xbd, + 0xe2, 0x4e, 0x41, 0x20, 0xb8, 0x6b, 0x05, 0x32, 0x52, 0x10, 0x9a, 0x85, 0xfa, 0x64, 0xb3, 0xbc, + 0x4a, 0xff, 0x60, 0x30, 0xcd, 0x63, 0x49, 0x1a, 0x5b, 0xca, 0x59, 0xcc, 0xf1, 0x21, 0x79, 0x8b, + 0xcb, 0x1e, 0xa8, 0xc0, 0xe9, 0x5a, 0x8a, 0xdb, 0xa1, 0x39, 0xad, 0xd5, 0x1e, 0x67, 0xb7, 0xee, + 0xf6, 0x4d, 0x6f, 0xed, 0xea, 0xd2, 0x37, 0xdc, 0x0e, 0x37, 0x84, 0x0a, 0xfa, 0x0c, 0x7b, 0x19, + 0x41, 0x96, 0x71, 0x31, 0xe9, 0xc1, 0x3b, 0x2e, 0x98, 0xff, 0x25, 0xd7, 0x73, 0xd3, 0x60, 0x19, + 0x43, 0xda, 0x78, 0x9e, 0xf7, 0xb8, 0xe3, 0xf2, 0x8e, 0xe3, 0x3a, 0xaa, 0x6f, 0x7d, 0x94, 0x02, + 0xcc, 0x62, 0x1d, 0x35, 0x4b, 0xed, 0x3b, 0x83, 0xd3, 0xda, 0xcd, 0x20, 0xab, 0xe4, 0xa9, 0x77, + 0x52, 0x40, 0xf5, 0x19, 0xfe, 0xff, 0x9a, 0x01, 0x52, 0xc1, 0x93, 0xa3, 0xa3, 0x2f, 0xb1, 0x64, + 0x49, 0x16, 0x71, 0xa1, 0xc7, 0xdd, 0x08, 0xf4, 0x49, 0x96, 0x58, 0x0a, 0xd6, 0x26, 0x9e, 0xa0, + 0xf6, 0x02, 0x9e, 0x97, 0x7e, 0x32, 0x7c, 0xc3, 0xcd, 0x4c, 0x7c, 0xbd, 0x9c, 0x2a, 0x4e, 0x55, + 0x0a, 0x57, 0xa6, 0x64, 0x4f, 0x49, 0xdf, 0x1f, 0x87, 0x29, 0x19, 0xe4, 0x47, 0x3b, 0xe0, 0xe1, + 0xe1, 0x18, 0x98, 0x26, 0x8f, 0xf0, 0xac, 0x7e, 0xd3, 0x2c, 0x0f, 0xc2, 0x90, 0xdb, 0xa0, 0xe7, + 0xa3, 0xd4, 0x9e, 0x1f, 0x9c, 0xd6, 0xae, 0x06, 0xd8, 0x8c, 0x86, 0xbb, 0x29, 0x6a, 0x7c, 0x43, + 0xb8, 0xce, 0xc0, 0x93, 0x3d, 0x18, 0x3d, 0xa2, 0x63, 0xf0, 0x26, 0x34, 0x36, 0xf1, 0xdd, 0x5b, + 0x4c, 0xff, 0xc5, 0x17, 0xd0, 0x7e, 0x78, 0x7c, 0x46, 0x8d, 0x93, 0x33, 0x6a, 0x5c, 0x9c, 0x51, + 0xf4, 0x29, 0xa6, 0xe8, 0x4b, 0x4c, 0x8d, 0xef, 0x31, 0x45, 0xc7, 0x31, 0x45, 0x3f, 0x62, 0x8a, + 0x7e, 0xc6, 0xd4, 0xb8, 0x88, 0x29, 0xfa, 0x7c, 0x4e, 0x8d, 0xe3, 0x73, 0x6a, 0x9c, 0x9c, 0x53, + 0xa3, 0x33, 0xad, 0xbf, 0x9e, 0x07, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x02, 0x9c, 0x2f, 0xec, + 0xe4, 0x06, 0x00, 0x00, +} + +func (this *EvacuationResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.EvacuationResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + s = append(s, "KeepContainer: "+fmt.Sprintf("%#v", this.KeepContainer)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EvacuateClaimedActualLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.EvacuateClaimedActualLRPRequest{") + if this.ActualLrpKey != nil { + s = append(s, "ActualLrpKey: "+fmt.Sprintf("%#v", this.ActualLrpKey)+",\n") + } + if this.ActualLrpInstanceKey != nil { + s = append(s, "ActualLrpInstanceKey: "+fmt.Sprintf("%#v", this.ActualLrpInstanceKey)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EvacuateRunningActualLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&models.EvacuateRunningActualLRPRequest{") + if this.ActualLrpKey != nil { + s = append(s, "ActualLrpKey: "+fmt.Sprintf("%#v", this.ActualLrpKey)+",\n") + } + if this.ActualLrpInstanceKey != nil { + s = append(s, "ActualLrpInstanceKey: "+fmt.Sprintf("%#v", this.ActualLrpInstanceKey)+",\n") + } + if this.ActualLrpNetInfo != nil { + s = append(s, "ActualLrpNetInfo: "+fmt.Sprintf("%#v", this.ActualLrpNetInfo)+",\n") + } + if this.ActualLrpInternalRoutes != nil { + s = append(s, "ActualLrpInternalRoutes: "+fmt.Sprintf("%#v", this.ActualLrpInternalRoutes)+",\n") + } + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]string{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%#v: %#v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + if this.MetricTags != nil { + s = append(s, "MetricTags: "+mapStringForMetricTags+",\n") + } + if this.OptionalRoutable != nil { + s = append(s, "OptionalRoutable: "+fmt.Sprintf("%#v", this.OptionalRoutable)+",\n") + } + s = append(s, "AvailabilityZone: "+fmt.Sprintf("%#v", this.AvailabilityZone)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EvacuateRunningActualLRPRequest_Routable) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&models.EvacuateRunningActualLRPRequest_Routable{` + + `Routable:` + fmt.Sprintf("%#v", this.Routable) + `}`}, ", ") + return s +} +func (this *EvacuateStoppedActualLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.EvacuateStoppedActualLRPRequest{") + if this.ActualLrpKey != nil { + s = append(s, "ActualLrpKey: "+fmt.Sprintf("%#v", this.ActualLrpKey)+",\n") + } + if this.ActualLrpInstanceKey != nil { + s = append(s, "ActualLrpInstanceKey: "+fmt.Sprintf("%#v", this.ActualLrpInstanceKey)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EvacuateCrashedActualLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&models.EvacuateCrashedActualLRPRequest{") + if this.ActualLrpKey != nil { + s = append(s, "ActualLrpKey: "+fmt.Sprintf("%#v", this.ActualLrpKey)+",\n") + } + if this.ActualLrpInstanceKey != nil { + s = append(s, "ActualLrpInstanceKey: "+fmt.Sprintf("%#v", this.ActualLrpInstanceKey)+",\n") + } + s = append(s, "ErrorMessage: "+fmt.Sprintf("%#v", this.ErrorMessage)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RemoveEvacuatingActualLRPRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.RemoveEvacuatingActualLRPRequest{") + if this.ActualLrpKey != nil { + s = append(s, "ActualLrpKey: "+fmt.Sprintf("%#v", this.ActualLrpKey)+",\n") + } + if this.ActualLrpInstanceKey != nil { + s = append(s, "ActualLrpInstanceKey: "+fmt.Sprintf("%#v", this.ActualLrpInstanceKey)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RemoveEvacuatingActualLRPResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.RemoveEvacuatingActualLRPResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringEvacuation(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *EvacuationResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EvacuationResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EvacuationResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.KeepContainer { + i-- + if m.KeepContainer { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EvacuateClaimedActualLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EvacuateClaimedActualLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EvacuateClaimedActualLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ActualLrpInstanceKey != nil { + { + size, err := m.ActualLrpInstanceKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ActualLrpKey != nil { + { + size, err := m.ActualLrpKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EvacuateRunningActualLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EvacuateRunningActualLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EvacuateRunningActualLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AvailabilityZone) > 0 { + i -= len(m.AvailabilityZone) + copy(dAtA[i:], m.AvailabilityZone) + i = encodeVarintEvacuation(dAtA, i, uint64(len(m.AvailabilityZone))) + i-- + dAtA[i] = 0x42 + } + if m.OptionalRoutable != nil { + { + size := m.OptionalRoutable.Size() + i -= size + if _, err := m.OptionalRoutable.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if len(m.MetricTags) > 0 { + for k := range m.MetricTags { + v := m.MetricTags[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintEvacuation(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintEvacuation(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintEvacuation(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.ActualLrpInternalRoutes) > 0 { + for iNdEx := len(m.ActualLrpInternalRoutes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ActualLrpInternalRoutes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.ActualLrpNetInfo != nil { + { + size, err := m.ActualLrpNetInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ActualLrpInstanceKey != nil { + { + size, err := m.ActualLrpInstanceKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ActualLrpKey != nil { + { + size, err := m.ActualLrpKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EvacuateRunningActualLRPRequest_Routable) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EvacuateRunningActualLRPRequest_Routable) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.Routable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + return len(dAtA) - i, nil +} +func (m *EvacuateStoppedActualLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EvacuateStoppedActualLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EvacuateStoppedActualLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ActualLrpInstanceKey != nil { + { + size, err := m.ActualLrpInstanceKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ActualLrpKey != nil { + { + size, err := m.ActualLrpKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EvacuateCrashedActualLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EvacuateCrashedActualLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EvacuateCrashedActualLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ErrorMessage) > 0 { + i -= len(m.ErrorMessage) + copy(dAtA[i:], m.ErrorMessage) + i = encodeVarintEvacuation(dAtA, i, uint64(len(m.ErrorMessage))) + i-- + dAtA[i] = 0x1a + } + if m.ActualLrpInstanceKey != nil { + { + size, err := m.ActualLrpInstanceKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ActualLrpKey != nil { + { + size, err := m.ActualLrpKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoveEvacuatingActualLRPRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveEvacuatingActualLRPRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemoveEvacuatingActualLRPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ActualLrpInstanceKey != nil { + { + size, err := m.ActualLrpInstanceKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ActualLrpKey != nil { + { + size, err := m.ActualLrpKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoveEvacuatingActualLRPResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveEvacuatingActualLRPResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemoveEvacuatingActualLRPResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvacuation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintEvacuation(dAtA []byte, offset int, v uint64) int { + offset -= sovEvacuation(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *EvacuationResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + if m.KeepContainer { + n += 2 + } + return n +} + +func (m *EvacuateClaimedActualLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrpKey != nil { + l = m.ActualLrpKey.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + if m.ActualLrpInstanceKey != nil { + l = m.ActualLrpInstanceKey.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + return n +} + +func (m *EvacuateRunningActualLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrpKey != nil { + l = m.ActualLrpKey.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + if m.ActualLrpInstanceKey != nil { + l = m.ActualLrpInstanceKey.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + if m.ActualLrpNetInfo != nil { + l = m.ActualLrpNetInfo.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + if len(m.ActualLrpInternalRoutes) > 0 { + for _, e := range m.ActualLrpInternalRoutes { + l = e.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + } + if len(m.MetricTags) > 0 { + for k, v := range m.MetricTags { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovEvacuation(uint64(len(k))) + 1 + len(v) + sovEvacuation(uint64(len(v))) + n += mapEntrySize + 1 + sovEvacuation(uint64(mapEntrySize)) + } + } + if m.OptionalRoutable != nil { + n += m.OptionalRoutable.Size() + } + l = len(m.AvailabilityZone) + if l > 0 { + n += 1 + l + sovEvacuation(uint64(l)) + } + return n +} + +func (m *EvacuateRunningActualLRPRequest_Routable) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *EvacuateStoppedActualLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrpKey != nil { + l = m.ActualLrpKey.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + if m.ActualLrpInstanceKey != nil { + l = m.ActualLrpInstanceKey.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + return n +} + +func (m *EvacuateCrashedActualLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrpKey != nil { + l = m.ActualLrpKey.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + if m.ActualLrpInstanceKey != nil { + l = m.ActualLrpInstanceKey.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + l = len(m.ErrorMessage) + if l > 0 { + n += 1 + l + sovEvacuation(uint64(l)) + } + return n +} + +func (m *RemoveEvacuatingActualLRPRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrpKey != nil { + l = m.ActualLrpKey.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + if m.ActualLrpInstanceKey != nil { + l = m.ActualLrpInstanceKey.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + return n +} + +func (m *RemoveEvacuatingActualLRPResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovEvacuation(uint64(l)) + } + return n +} + +func sovEvacuation(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEvacuation(x uint64) (n int) { + return sovEvacuation(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *EvacuationResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EvacuationResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `KeepContainer:` + fmt.Sprintf("%v", this.KeepContainer) + `,`, + `}`, + }, "") + return s +} +func (this *EvacuateClaimedActualLRPRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EvacuateClaimedActualLRPRequest{`, + `ActualLrpKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpKey), "ActualLRPKey", "ActualLRPKey", 1) + `,`, + `ActualLrpInstanceKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpInstanceKey), "ActualLRPInstanceKey", "ActualLRPInstanceKey", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EvacuateRunningActualLRPRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForActualLrpInternalRoutes := "[]*ActualLRPInternalRoute{" + for _, f := range this.ActualLrpInternalRoutes { + repeatedStringForActualLrpInternalRoutes += strings.Replace(fmt.Sprintf("%v", f), "ActualLRPInternalRoute", "ActualLRPInternalRoute", 1) + "," + } + repeatedStringForActualLrpInternalRoutes += "}" + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]string{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%v: %v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + s := strings.Join([]string{`&EvacuateRunningActualLRPRequest{`, + `ActualLrpKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpKey), "ActualLRPKey", "ActualLRPKey", 1) + `,`, + `ActualLrpInstanceKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpInstanceKey), "ActualLRPInstanceKey", "ActualLRPInstanceKey", 1) + `,`, + `ActualLrpNetInfo:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpNetInfo), "ActualLRPNetInfo", "ActualLRPNetInfo", 1) + `,`, + `ActualLrpInternalRoutes:` + repeatedStringForActualLrpInternalRoutes + `,`, + `MetricTags:` + mapStringForMetricTags + `,`, + `OptionalRoutable:` + fmt.Sprintf("%v", this.OptionalRoutable) + `,`, + `AvailabilityZone:` + fmt.Sprintf("%v", this.AvailabilityZone) + `,`, + `}`, + }, "") + return s +} +func (this *EvacuateRunningActualLRPRequest_Routable) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EvacuateRunningActualLRPRequest_Routable{`, + `Routable:` + fmt.Sprintf("%v", this.Routable) + `,`, + `}`, + }, "") + return s +} +func (this *EvacuateStoppedActualLRPRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EvacuateStoppedActualLRPRequest{`, + `ActualLrpKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpKey), "ActualLRPKey", "ActualLRPKey", 1) + `,`, + `ActualLrpInstanceKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpInstanceKey), "ActualLRPInstanceKey", "ActualLRPInstanceKey", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EvacuateCrashedActualLRPRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EvacuateCrashedActualLRPRequest{`, + `ActualLrpKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpKey), "ActualLRPKey", "ActualLRPKey", 1) + `,`, + `ActualLrpInstanceKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpInstanceKey), "ActualLRPInstanceKey", "ActualLRPInstanceKey", 1) + `,`, + `ErrorMessage:` + fmt.Sprintf("%v", this.ErrorMessage) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveEvacuatingActualLRPRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveEvacuatingActualLRPRequest{`, + `ActualLrpKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpKey), "ActualLRPKey", "ActualLRPKey", 1) + `,`, + `ActualLrpInstanceKey:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpInstanceKey), "ActualLRPInstanceKey", "ActualLRPInstanceKey", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveEvacuatingActualLRPResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveEvacuatingActualLRPResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringEvacuation(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *EvacuationResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EvacuationResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EvacuationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepContainer", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.KeepContainer = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipEvacuation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvacuation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EvacuateClaimedActualLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EvacuateClaimedActualLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EvacuateClaimedActualLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpKey == nil { + m.ActualLrpKey = &ActualLRPKey{} + } + if err := m.ActualLrpKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpInstanceKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpInstanceKey == nil { + m.ActualLrpInstanceKey = &ActualLRPInstanceKey{} + } + if err := m.ActualLrpInstanceKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvacuation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvacuation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EvacuateRunningActualLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EvacuateRunningActualLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EvacuateRunningActualLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpKey == nil { + m.ActualLrpKey = &ActualLRPKey{} + } + if err := m.ActualLrpKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpInstanceKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpInstanceKey == nil { + m.ActualLrpInstanceKey = &ActualLRPInstanceKey{} + } + if err := m.ActualLrpInstanceKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpNetInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpNetInfo == nil { + m.ActualLrpNetInfo = &ActualLRPNetInfo{} + } + if err := m.ActualLrpNetInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpInternalRoutes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ActualLrpInternalRoutes = append(m.ActualLrpInternalRoutes, &ActualLRPInternalRoute{}) + if err := m.ActualLrpInternalRoutes[len(m.ActualLrpInternalRoutes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricTags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricTags == nil { + m.MetricTags = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthEvacuation + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthEvacuation + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthEvacuation + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthEvacuation + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipEvacuation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvacuation + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MetricTags[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Routable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OptionalRoutable = &EvacuateRunningActualLRPRequest_Routable{b} + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailabilityZone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AvailabilityZone = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvacuation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvacuation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EvacuateStoppedActualLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EvacuateStoppedActualLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EvacuateStoppedActualLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpKey == nil { + m.ActualLrpKey = &ActualLRPKey{} + } + if err := m.ActualLrpKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpInstanceKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpInstanceKey == nil { + m.ActualLrpInstanceKey = &ActualLRPInstanceKey{} + } + if err := m.ActualLrpInstanceKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvacuation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvacuation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EvacuateCrashedActualLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EvacuateCrashedActualLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EvacuateCrashedActualLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpKey == nil { + m.ActualLrpKey = &ActualLRPKey{} + } + if err := m.ActualLrpKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpInstanceKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpInstanceKey == nil { + m.ActualLrpInstanceKey = &ActualLRPInstanceKey{} + } + if err := m.ActualLrpInstanceKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ErrorMessage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvacuation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvacuation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveEvacuatingActualLRPRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveEvacuatingActualLRPRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveEvacuatingActualLRPRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpKey == nil { + m.ActualLrpKey = &ActualLRPKey{} + } + if err := m.ActualLrpKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpInstanceKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpInstanceKey == nil { + m.ActualLrpInstanceKey = &ActualLRPInstanceKey{} + } + if err := m.ActualLrpInstanceKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvacuation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvacuation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveEvacuatingActualLRPResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveEvacuatingActualLRPResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveEvacuatingActualLRPResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvacuation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvacuation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvacuation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvacuation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvacuation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEvacuation(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvacuation + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvacuation + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvacuation + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEvacuation + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEvacuation + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEvacuation + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEvacuation = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEvacuation = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEvacuation = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/evacuation.proto b/vendor/code.cloudfoundry.org/bbs/models/evacuation.proto new file mode 100644 index 00000000..85f4883d --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/evacuation.proto @@ -0,0 +1,53 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "actual_lrp.proto"; +import "error.proto"; + +option (gogoproto.equal_all) = false; + +message EvacuationResponse { + Error error = 1; + bool keep_container = 2 [(gogoproto.jsontag) = "keep_container"]; +} + +message EvacuateClaimedActualLRPRequest { + ActualLRPKey actual_lrp_key = 1; + ActualLRPInstanceKey actual_lrp_instance_key = 2; +} + +message EvacuateRunningActualLRPRequest { + reserved 4; // previously removed ttl value + + ActualLRPKey actual_lrp_key = 1; + ActualLRPInstanceKey actual_lrp_instance_key = 2; + ActualLRPNetInfo actual_lrp_net_info = 3; + repeated ActualLRPInternalRoute actual_lrp_internal_routes = 5; + map metric_tags = 6; + oneof optional_routable { + bool Routable = 7; + } + string availability_zone = 8 [(gogoproto.jsontag) = "availability_zone"]; +} + +message EvacuateStoppedActualLRPRequest { + ActualLRPKey actual_lrp_key = 1; + ActualLRPInstanceKey actual_lrp_instance_key = 2; +} + +message EvacuateCrashedActualLRPRequest { + ActualLRPKey actual_lrp_key = 1; + ActualLRPInstanceKey actual_lrp_instance_key = 2; + string error_message = 3 [(gogoproto.jsontag) = "error_message"]; +} + +message RemoveEvacuatingActualLRPRequest { + ActualLRPKey actual_lrp_key = 1; + ActualLRPInstanceKey actual_lrp_instance_key = 2; +} + +message RemoveEvacuatingActualLRPResponse { + Error error = 1; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/events.go b/vendor/code.cloudfoundry.org/bbs/models/events.go new file mode 100644 index 00000000..d09fa1f0 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/events.go @@ -0,0 +1,329 @@ +package models + +import ( + "code.cloudfoundry.org/bbs/format" + "github.com/gogo/protobuf/proto" +) + +type Event interface { + EventType() string + Key() string + proto.Message +} + +const ( + EventTypeInvalid = "" + + EventTypeDesiredLRPCreated = "desired_lrp_created" + EventTypeDesiredLRPChanged = "desired_lrp_changed" + EventTypeDesiredLRPRemoved = "desired_lrp_removed" + + // Deprecated: use the ActualLRPInstance versions of this instead + EventTypeActualLRPCreated = "actual_lrp_created" + // Deprecated: use the ActualLRPInstance versions of this instead + EventTypeActualLRPChanged = "actual_lrp_changed" + // Deprecated: use the ActualLRPInstance versions of this instead + EventTypeActualLRPRemoved = "actual_lrp_removed" + EventTypeActualLRPCrashed = "actual_lrp_crashed" + + EventTypeActualLRPInstanceCreated = "actual_lrp_instance_created" + EventTypeActualLRPInstanceChanged = "actual_lrp_instance_changed" + EventTypeActualLRPInstanceRemoved = "actual_lrp_instance_removed" + + EventTypeTaskCreated = "task_created" + EventTypeTaskChanged = "task_changed" + EventTypeTaskRemoved = "task_removed" +) + +// Downgrade the DesiredLRPEvent payload (i.e. DesiredLRP(s)) to the given +// target version +func VersionDesiredLRPsTo(event Event, target format.Version) Event { + switch event := event.(type) { + case *DesiredLRPCreatedEvent: + return NewDesiredLRPCreatedEvent(event.DesiredLrp.VersionDownTo(target), event.TraceId) + case *DesiredLRPRemovedEvent: + return NewDesiredLRPRemovedEvent(event.DesiredLrp.VersionDownTo(target), event.TraceId) + case *DesiredLRPChangedEvent: + return NewDesiredLRPChangedEvent( + event.Before.VersionDownTo(target), + event.After.VersionDownTo(target), + event.TraceId, + ) + default: + return event + } +} + +// Downgrade the TaskEvent payload (i.e. Task(s)) to the given target version +func VersionTaskDefinitionsTo(event Event, target format.Version) Event { + switch event := event.(type) { + case *TaskCreatedEvent: + return NewTaskCreatedEvent(event.Task.VersionDownTo(target)) + case *TaskRemovedEvent: + return NewTaskRemovedEvent(event.Task.VersionDownTo(target)) + case *TaskChangedEvent: + return NewTaskChangedEvent(event.Before.VersionDownTo(target), event.After.VersionDownTo(target)) + default: + return event + } +} + +func NewDesiredLRPCreatedEvent(desiredLRP *DesiredLRP, traceId string) *DesiredLRPCreatedEvent { + return &DesiredLRPCreatedEvent{ + DesiredLrp: desiredLRP, + TraceId: traceId, + } +} + +func (event *DesiredLRPCreatedEvent) EventType() string { + return EventTypeDesiredLRPCreated +} + +func (event *DesiredLRPCreatedEvent) Key() string { + return event.DesiredLrp.GetProcessGuid() +} + +func NewDesiredLRPChangedEvent(before, after *DesiredLRP, traceId string) *DesiredLRPChangedEvent { + return &DesiredLRPChangedEvent{ + Before: before, + After: after, + TraceId: traceId, + } +} + +func (event *DesiredLRPChangedEvent) EventType() string { + return EventTypeDesiredLRPChanged +} + +func (event *DesiredLRPChangedEvent) Key() string { + return event.Before.GetProcessGuid() +} + +func NewDesiredLRPRemovedEvent(desiredLRP *DesiredLRP, traceId string) *DesiredLRPRemovedEvent { + return &DesiredLRPRemovedEvent{ + DesiredLrp: desiredLRP, + TraceId: traceId, + } +} + +func (event *DesiredLRPRemovedEvent) EventType() string { + return EventTypeDesiredLRPRemoved +} + +func (event DesiredLRPRemovedEvent) Key() string { + return event.DesiredLrp.GetProcessGuid() +} + +// FIXME: change the signature +func NewActualLRPInstanceChangedEvent(before, after *ActualLRP, traceId string) *ActualLRPInstanceChangedEvent { + var ( + actualLRPKey ActualLRPKey + actualLRPInstanceKey ActualLRPInstanceKey + ) + + if (before != nil && before.ActualLRPKey != ActualLRPKey{}) { + actualLRPKey = before.ActualLRPKey + } + if (after != nil && after.ActualLRPKey != ActualLRPKey{}) { + actualLRPKey = after.ActualLRPKey + } + + if (before != nil && before.ActualLRPInstanceKey != ActualLRPInstanceKey{}) { + actualLRPInstanceKey = before.ActualLRPInstanceKey + } + if (after != nil && after.ActualLRPInstanceKey != ActualLRPInstanceKey{}) { + actualLRPInstanceKey = after.ActualLRPInstanceKey + } + + return &ActualLRPInstanceChangedEvent{ + ActualLRPKey: actualLRPKey, + ActualLRPInstanceKey: actualLRPInstanceKey, + Before: before.ToActualLRPInfo(), + After: after.ToActualLRPInfo(), + TraceId: traceId, + } +} + +func (event *ActualLRPInstanceChangedEvent) EventType() string { + return EventTypeActualLRPInstanceChanged +} + +func (event *ActualLRPInstanceChangedEvent) Key() string { + return event.GetInstanceGuid() +} + +// Deprecated: use the ActualLRPInstance versions of this instead +func NewActualLRPChangedEvent(before, after *ActualLRPGroup) *ActualLRPChangedEvent { + return &ActualLRPChangedEvent{ + Before: before, + After: after, + } +} + +// Deprecated: use the ActualLRPInstance versions of this instead +func (event *ActualLRPChangedEvent) EventType() string { + return EventTypeActualLRPChanged +} + +// Deprecated: use the ActualLRPInstance versions of this instead +func (event *ActualLRPChangedEvent) Key() string { + actualLRP, _, resolveError := event.Before.Resolve() + if resolveError != nil { + return "" + } + return actualLRP.GetInstanceGuid() +} + +func NewActualLRPCrashedEvent(before, after *ActualLRP) *ActualLRPCrashedEvent { + return &ActualLRPCrashedEvent{ + ActualLRPKey: after.ActualLRPKey, + ActualLRPInstanceKey: before.ActualLRPInstanceKey, + CrashCount: after.CrashCount, + CrashReason: after.CrashReason, + Since: after.Since, + } +} + +func (event *ActualLRPCrashedEvent) EventType() string { + return EventTypeActualLRPCrashed +} + +func (event *ActualLRPCrashedEvent) Key() string { + return event.ActualLRPInstanceKey.InstanceGuid +} + +// Deprecated: use the ActualLRPInstance versions of this instead +func NewActualLRPRemovedEvent(actualLRPGroup *ActualLRPGroup) *ActualLRPRemovedEvent { + return &ActualLRPRemovedEvent{ + ActualLrpGroup: actualLRPGroup, + } +} + +// Deprecated: use the ActualLRPInstance versions of this instead +func (event *ActualLRPRemovedEvent) EventType() string { + return EventTypeActualLRPRemoved +} + +// Deprecated: use the ActualLRPInstance versions of this instead +func (event *ActualLRPRemovedEvent) Key() string { + actualLRP, _, resolveError := event.ActualLrpGroup.Resolve() + if resolveError != nil { + return "" + } + return actualLRP.GetInstanceGuid() +} + +func NewActualLRPInstanceRemovedEvent(actualLrp *ActualLRP, traceId string) *ActualLRPInstanceRemovedEvent { + return &ActualLRPInstanceRemovedEvent{ + ActualLrp: actualLrp, + TraceId: traceId, + } +} + +func (event *ActualLRPInstanceRemovedEvent) EventType() string { + return EventTypeActualLRPInstanceRemoved +} + +func (event *ActualLRPInstanceRemovedEvent) Key() string { + if event.ActualLrp == nil { + return "" + } + return event.ActualLrp.GetInstanceGuid() +} + +// Deprecated: use the ActualLRPInstance versions of this instead +func NewActualLRPCreatedEvent(actualLRPGroup *ActualLRPGroup) *ActualLRPCreatedEvent { + return &ActualLRPCreatedEvent{ + ActualLrpGroup: actualLRPGroup, + } +} + +// Deprecated: use the ActualLRPInstance versions of this instead +func (event *ActualLRPCreatedEvent) EventType() string { + return EventTypeActualLRPCreated +} + +// Deprecated: use the ActualLRPInstance versions of this instead +func (event *ActualLRPCreatedEvent) Key() string { + actualLRP, _, resolveError := event.ActualLrpGroup.Resolve() + if resolveError != nil { + return "" + } + return actualLRP.GetInstanceGuid() +} + +func NewActualLRPInstanceCreatedEvent(actualLrp *ActualLRP, traceId string) *ActualLRPInstanceCreatedEvent { + return &ActualLRPInstanceCreatedEvent{ + ActualLrp: actualLrp, + TraceId: traceId, + } +} + +func (event *ActualLRPInstanceCreatedEvent) EventType() string { + return EventTypeActualLRPInstanceCreated +} + +func (event *ActualLRPInstanceCreatedEvent) Key() string { + if event.ActualLrp == nil { + return "" + } + return event.ActualLrp.GetInstanceGuid() +} + +func (request *EventsByCellId) Validate() error { + return nil +} + +func NewTaskCreatedEvent(task *Task) *TaskCreatedEvent { + return &TaskCreatedEvent{ + Task: task, + } +} + +func (event *TaskCreatedEvent) EventType() string { + return EventTypeTaskCreated +} + +func (event *TaskCreatedEvent) Key() string { + return event.Task.GetTaskGuid() +} + +func NewTaskChangedEvent(before, after *Task) *TaskChangedEvent { + return &TaskChangedEvent{ + Before: before, + After: after, + } +} + +func (event *TaskChangedEvent) EventType() string { + return EventTypeTaskChanged +} + +func (event *TaskChangedEvent) Key() string { + return event.Before.GetTaskGuid() +} + +func NewTaskRemovedEvent(task *Task) *TaskRemovedEvent { + return &TaskRemovedEvent{ + Task: task, + } +} + +func (event *TaskRemovedEvent) EventType() string { + return EventTypeTaskRemoved +} + +func (event TaskRemovedEvent) Key() string { + return event.Task.GetTaskGuid() +} + +func (info *ActualLRPInfo) SetRoutable(routable bool) { + info.OptionalRoutable = &ActualLRPInfo_Routable{ + Routable: routable, + } +} + +func (info *ActualLRPInfo) RoutableExists() bool { + _, ok := info.GetOptionalRoutable().(*ActualLRPInfo_Routable) + return ok +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/events.pb.go b/vendor/code.cloudfoundry.org/bbs/models/events.pb.go new file mode 100644 index 00000000..1716470b --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/events.pb.go @@ -0,0 +1,4977 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: events.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Deprecated: Do not use. +type ActualLRPCreatedEvent struct { + ActualLrpGroup *ActualLRPGroup `protobuf:"bytes,1,opt,name=actual_lrp_group,json=actualLrpGroup,proto3" json:"actual_lrp_group,omitempty"` +} + +func (m *ActualLRPCreatedEvent) Reset() { *m = ActualLRPCreatedEvent{} } +func (*ActualLRPCreatedEvent) ProtoMessage() {} +func (*ActualLRPCreatedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{0} +} +func (m *ActualLRPCreatedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPCreatedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPCreatedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPCreatedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPCreatedEvent.Merge(m, src) +} +func (m *ActualLRPCreatedEvent) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPCreatedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPCreatedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPCreatedEvent proto.InternalMessageInfo + +func (m *ActualLRPCreatedEvent) GetActualLrpGroup() *ActualLRPGroup { + if m != nil { + return m.ActualLrpGroup + } + return nil +} + +// Deprecated: Do not use. +type ActualLRPChangedEvent struct { + Before *ActualLRPGroup `protobuf:"bytes,1,opt,name=before,proto3" json:"before,omitempty"` + After *ActualLRPGroup `protobuf:"bytes,2,opt,name=after,proto3" json:"after,omitempty"` +} + +func (m *ActualLRPChangedEvent) Reset() { *m = ActualLRPChangedEvent{} } +func (*ActualLRPChangedEvent) ProtoMessage() {} +func (*ActualLRPChangedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{1} +} +func (m *ActualLRPChangedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPChangedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPChangedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPChangedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPChangedEvent.Merge(m, src) +} +func (m *ActualLRPChangedEvent) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPChangedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPChangedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPChangedEvent proto.InternalMessageInfo + +func (m *ActualLRPChangedEvent) GetBefore() *ActualLRPGroup { + if m != nil { + return m.Before + } + return nil +} + +func (m *ActualLRPChangedEvent) GetAfter() *ActualLRPGroup { + if m != nil { + return m.After + } + return nil +} + +// Deprecated: Do not use. +type ActualLRPRemovedEvent struct { + ActualLrpGroup *ActualLRPGroup `protobuf:"bytes,1,opt,name=actual_lrp_group,json=actualLrpGroup,proto3" json:"actual_lrp_group,omitempty"` +} + +func (m *ActualLRPRemovedEvent) Reset() { *m = ActualLRPRemovedEvent{} } +func (*ActualLRPRemovedEvent) ProtoMessage() {} +func (*ActualLRPRemovedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{2} +} +func (m *ActualLRPRemovedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPRemovedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPRemovedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPRemovedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPRemovedEvent.Merge(m, src) +} +func (m *ActualLRPRemovedEvent) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPRemovedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPRemovedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPRemovedEvent proto.InternalMessageInfo + +func (m *ActualLRPRemovedEvent) GetActualLrpGroup() *ActualLRPGroup { + if m != nil { + return m.ActualLrpGroup + } + return nil +} + +type ActualLRPInstanceCreatedEvent struct { + ActualLrp *ActualLRP `protobuf:"bytes,1,opt,name=actual_lrp,json=actualLrp,proto3" json:"actual_lrp,omitempty"` + TraceId string `protobuf:"bytes,2,opt,name=trace_id,json=traceId,proto3" json:"trace_id"` +} + +func (m *ActualLRPInstanceCreatedEvent) Reset() { *m = ActualLRPInstanceCreatedEvent{} } +func (*ActualLRPInstanceCreatedEvent) ProtoMessage() {} +func (*ActualLRPInstanceCreatedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{3} +} +func (m *ActualLRPInstanceCreatedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPInstanceCreatedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPInstanceCreatedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPInstanceCreatedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPInstanceCreatedEvent.Merge(m, src) +} +func (m *ActualLRPInstanceCreatedEvent) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPInstanceCreatedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPInstanceCreatedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPInstanceCreatedEvent proto.InternalMessageInfo + +func (m *ActualLRPInstanceCreatedEvent) GetActualLrp() *ActualLRP { + if m != nil { + return m.ActualLrp + } + return nil +} + +func (m *ActualLRPInstanceCreatedEvent) GetTraceId() string { + if m != nil { + return m.TraceId + } + return "" +} + +type ActualLRPInfo struct { + ActualLRPNetInfo `protobuf:"bytes,3,opt,name=actual_lrp_net_info,json=actualLrpNetInfo,proto3,embedded=actual_lrp_net_info" json:""` + CrashCount int32 `protobuf:"varint,4,opt,name=crash_count,json=crashCount,proto3" json:"crash_count"` + CrashReason string `protobuf:"bytes,5,opt,name=crash_reason,json=crashReason,proto3" json:"crash_reason,omitempty"` + State string `protobuf:"bytes,6,opt,name=state,proto3" json:"state"` + PlacementError string `protobuf:"bytes,7,opt,name=placement_error,json=placementError,proto3" json:"placement_error,omitempty"` + Since int64 `protobuf:"varint,8,opt,name=since,proto3" json:"since"` + ModificationTag ModificationTag `protobuf:"bytes,9,opt,name=modification_tag,json=modificationTag,proto3" json:"modification_tag"` + Presence ActualLRP_Presence `protobuf:"varint,10,opt,name=presence,proto3,enum=models.ActualLRP_Presence" json:"presence"` + // Types that are valid to be assigned to OptionalRoutable: + // *ActualLRPInfo_Routable + OptionalRoutable isActualLRPInfo_OptionalRoutable `protobuf_oneof:"optional_routable"` + AvailabilityZone string `protobuf:"bytes,12,opt,name=availability_zone,json=availabilityZone,proto3" json:"availability_zone"` +} + +func (m *ActualLRPInfo) Reset() { *m = ActualLRPInfo{} } +func (*ActualLRPInfo) ProtoMessage() {} +func (*ActualLRPInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{4} +} +func (m *ActualLRPInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPInfo.Merge(m, src) +} +func (m *ActualLRPInfo) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPInfo proto.InternalMessageInfo + +type isActualLRPInfo_OptionalRoutable interface { + isActualLRPInfo_OptionalRoutable() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type ActualLRPInfo_Routable struct { + Routable bool `protobuf:"varint,11,opt,name=Routable,proto3,oneof" json:"Routable,omitempty"` +} + +func (*ActualLRPInfo_Routable) isActualLRPInfo_OptionalRoutable() {} + +func (m *ActualLRPInfo) GetOptionalRoutable() isActualLRPInfo_OptionalRoutable { + if m != nil { + return m.OptionalRoutable + } + return nil +} + +func (m *ActualLRPInfo) GetCrashCount() int32 { + if m != nil { + return m.CrashCount + } + return 0 +} + +func (m *ActualLRPInfo) GetCrashReason() string { + if m != nil { + return m.CrashReason + } + return "" +} + +func (m *ActualLRPInfo) GetState() string { + if m != nil { + return m.State + } + return "" +} + +func (m *ActualLRPInfo) GetPlacementError() string { + if m != nil { + return m.PlacementError + } + return "" +} + +func (m *ActualLRPInfo) GetSince() int64 { + if m != nil { + return m.Since + } + return 0 +} + +func (m *ActualLRPInfo) GetModificationTag() ModificationTag { + if m != nil { + return m.ModificationTag + } + return ModificationTag{} +} + +func (m *ActualLRPInfo) GetPresence() ActualLRP_Presence { + if m != nil { + return m.Presence + } + return ActualLRP_Ordinary +} + +func (m *ActualLRPInfo) GetRoutable() bool { + if x, ok := m.GetOptionalRoutable().(*ActualLRPInfo_Routable); ok { + return x.Routable + } + return false +} + +func (m *ActualLRPInfo) GetAvailabilityZone() string { + if m != nil { + return m.AvailabilityZone + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ActualLRPInfo) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ActualLRPInfo_Routable)(nil), + } +} + +type ActualLRPInstanceChangedEvent struct { + ActualLRPKey `protobuf:"bytes,1,opt,name=actual_lrp_key,json=actualLrpKey,proto3,embedded=actual_lrp_key" json:""` + ActualLRPInstanceKey `protobuf:"bytes,2,opt,name=actual_lrp_instance_key,json=actualLrpInstanceKey,proto3,embedded=actual_lrp_instance_key" json:""` + Before *ActualLRPInfo `protobuf:"bytes,3,opt,name=before,proto3" json:"before,omitempty"` + After *ActualLRPInfo `protobuf:"bytes,4,opt,name=after,proto3" json:"after,omitempty"` + TraceId string `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3" json:"trace_id"` +} + +func (m *ActualLRPInstanceChangedEvent) Reset() { *m = ActualLRPInstanceChangedEvent{} } +func (*ActualLRPInstanceChangedEvent) ProtoMessage() {} +func (*ActualLRPInstanceChangedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{5} +} +func (m *ActualLRPInstanceChangedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPInstanceChangedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPInstanceChangedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPInstanceChangedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPInstanceChangedEvent.Merge(m, src) +} +func (m *ActualLRPInstanceChangedEvent) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPInstanceChangedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPInstanceChangedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPInstanceChangedEvent proto.InternalMessageInfo + +func (m *ActualLRPInstanceChangedEvent) GetBefore() *ActualLRPInfo { + if m != nil { + return m.Before + } + return nil +} + +func (m *ActualLRPInstanceChangedEvent) GetAfter() *ActualLRPInfo { + if m != nil { + return m.After + } + return nil +} + +func (m *ActualLRPInstanceChangedEvent) GetTraceId() string { + if m != nil { + return m.TraceId + } + return "" +} + +type ActualLRPInstanceRemovedEvent struct { + ActualLrp *ActualLRP `protobuf:"bytes,1,opt,name=actual_lrp,json=actualLrp,proto3" json:"actual_lrp,omitempty"` + TraceId string `protobuf:"bytes,2,opt,name=trace_id,json=traceId,proto3" json:"trace_id"` +} + +func (m *ActualLRPInstanceRemovedEvent) Reset() { *m = ActualLRPInstanceRemovedEvent{} } +func (*ActualLRPInstanceRemovedEvent) ProtoMessage() {} +func (*ActualLRPInstanceRemovedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{6} +} +func (m *ActualLRPInstanceRemovedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPInstanceRemovedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPInstanceRemovedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPInstanceRemovedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPInstanceRemovedEvent.Merge(m, src) +} +func (m *ActualLRPInstanceRemovedEvent) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPInstanceRemovedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPInstanceRemovedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPInstanceRemovedEvent proto.InternalMessageInfo + +func (m *ActualLRPInstanceRemovedEvent) GetActualLrp() *ActualLRP { + if m != nil { + return m.ActualLrp + } + return nil +} + +func (m *ActualLRPInstanceRemovedEvent) GetTraceId() string { + if m != nil { + return m.TraceId + } + return "" +} + +type DesiredLRPCreatedEvent struct { + DesiredLrp *DesiredLRP `protobuf:"bytes,1,opt,name=desired_lrp,json=desiredLrp,proto3" json:"desired_lrp,omitempty"` + TraceId string `protobuf:"bytes,2,opt,name=trace_id,json=traceId,proto3" json:"trace_id"` +} + +func (m *DesiredLRPCreatedEvent) Reset() { *m = DesiredLRPCreatedEvent{} } +func (*DesiredLRPCreatedEvent) ProtoMessage() {} +func (*DesiredLRPCreatedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{7} +} +func (m *DesiredLRPCreatedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPCreatedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPCreatedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPCreatedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPCreatedEvent.Merge(m, src) +} +func (m *DesiredLRPCreatedEvent) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPCreatedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPCreatedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPCreatedEvent proto.InternalMessageInfo + +func (m *DesiredLRPCreatedEvent) GetDesiredLrp() *DesiredLRP { + if m != nil { + return m.DesiredLrp + } + return nil +} + +func (m *DesiredLRPCreatedEvent) GetTraceId() string { + if m != nil { + return m.TraceId + } + return "" +} + +type DesiredLRPChangedEvent struct { + Before *DesiredLRP `protobuf:"bytes,1,opt,name=before,proto3" json:"before,omitempty"` + After *DesiredLRP `protobuf:"bytes,2,opt,name=after,proto3" json:"after,omitempty"` + TraceId string `protobuf:"bytes,3,opt,name=trace_id,json=traceId,proto3" json:"trace_id"` +} + +func (m *DesiredLRPChangedEvent) Reset() { *m = DesiredLRPChangedEvent{} } +func (*DesiredLRPChangedEvent) ProtoMessage() {} +func (*DesiredLRPChangedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{8} +} +func (m *DesiredLRPChangedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPChangedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPChangedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPChangedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPChangedEvent.Merge(m, src) +} +func (m *DesiredLRPChangedEvent) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPChangedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPChangedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPChangedEvent proto.InternalMessageInfo + +func (m *DesiredLRPChangedEvent) GetBefore() *DesiredLRP { + if m != nil { + return m.Before + } + return nil +} + +func (m *DesiredLRPChangedEvent) GetAfter() *DesiredLRP { + if m != nil { + return m.After + } + return nil +} + +func (m *DesiredLRPChangedEvent) GetTraceId() string { + if m != nil { + return m.TraceId + } + return "" +} + +type DesiredLRPRemovedEvent struct { + DesiredLrp *DesiredLRP `protobuf:"bytes,1,opt,name=desired_lrp,json=desiredLrp,proto3" json:"desired_lrp,omitempty"` + TraceId string `protobuf:"bytes,2,opt,name=trace_id,json=traceId,proto3" json:"trace_id"` +} + +func (m *DesiredLRPRemovedEvent) Reset() { *m = DesiredLRPRemovedEvent{} } +func (*DesiredLRPRemovedEvent) ProtoMessage() {} +func (*DesiredLRPRemovedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{9} +} +func (m *DesiredLRPRemovedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesiredLRPRemovedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesiredLRPRemovedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesiredLRPRemovedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesiredLRPRemovedEvent.Merge(m, src) +} +func (m *DesiredLRPRemovedEvent) XXX_Size() int { + return m.Size() +} +func (m *DesiredLRPRemovedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_DesiredLRPRemovedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_DesiredLRPRemovedEvent proto.InternalMessageInfo + +func (m *DesiredLRPRemovedEvent) GetDesiredLrp() *DesiredLRP { + if m != nil { + return m.DesiredLrp + } + return nil +} + +func (m *DesiredLRPRemovedEvent) GetTraceId() string { + if m != nil { + return m.TraceId + } + return "" +} + +type ActualLRPCrashedEvent struct { + ActualLRPKey `protobuf:"bytes,1,opt,name=actual_lrp_key,json=actualLrpKey,proto3,embedded=actual_lrp_key" json:""` + ActualLRPInstanceKey `protobuf:"bytes,2,opt,name=actual_lrp_instance_key,json=actualLrpInstanceKey,proto3,embedded=actual_lrp_instance_key" json:""` + CrashCount int32 `protobuf:"varint,3,opt,name=crash_count,json=crashCount,proto3" json:"crash_count"` + CrashReason string `protobuf:"bytes,4,opt,name=crash_reason,json=crashReason,proto3" json:"crash_reason,omitempty"` + Since int64 `protobuf:"varint,5,opt,name=since,proto3" json:"since"` +} + +func (m *ActualLRPCrashedEvent) Reset() { *m = ActualLRPCrashedEvent{} } +func (*ActualLRPCrashedEvent) ProtoMessage() {} +func (*ActualLRPCrashedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{10} +} +func (m *ActualLRPCrashedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ActualLRPCrashedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ActualLRPCrashedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ActualLRPCrashedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_ActualLRPCrashedEvent.Merge(m, src) +} +func (m *ActualLRPCrashedEvent) XXX_Size() int { + return m.Size() +} +func (m *ActualLRPCrashedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_ActualLRPCrashedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_ActualLRPCrashedEvent proto.InternalMessageInfo + +func (m *ActualLRPCrashedEvent) GetCrashCount() int32 { + if m != nil { + return m.CrashCount + } + return 0 +} + +func (m *ActualLRPCrashedEvent) GetCrashReason() string { + if m != nil { + return m.CrashReason + } + return "" +} + +func (m *ActualLRPCrashedEvent) GetSince() int64 { + if m != nil { + return m.Since + } + return 0 +} + +type EventsByCellId struct { + CellId string `protobuf:"bytes,1,opt,name=cell_id,json=cellId,proto3" json:"cell_id"` +} + +func (m *EventsByCellId) Reset() { *m = EventsByCellId{} } +func (*EventsByCellId) ProtoMessage() {} +func (*EventsByCellId) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{11} +} +func (m *EventsByCellId) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventsByCellId) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventsByCellId.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventsByCellId) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventsByCellId.Merge(m, src) +} +func (m *EventsByCellId) XXX_Size() int { + return m.Size() +} +func (m *EventsByCellId) XXX_DiscardUnknown() { + xxx_messageInfo_EventsByCellId.DiscardUnknown(m) +} + +var xxx_messageInfo_EventsByCellId proto.InternalMessageInfo + +func (m *EventsByCellId) GetCellId() string { + if m != nil { + return m.CellId + } + return "" +} + +type TaskCreatedEvent struct { + Task *Task `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` +} + +func (m *TaskCreatedEvent) Reset() { *m = TaskCreatedEvent{} } +func (*TaskCreatedEvent) ProtoMessage() {} +func (*TaskCreatedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{12} +} +func (m *TaskCreatedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TaskCreatedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TaskCreatedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TaskCreatedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskCreatedEvent.Merge(m, src) +} +func (m *TaskCreatedEvent) XXX_Size() int { + return m.Size() +} +func (m *TaskCreatedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_TaskCreatedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskCreatedEvent proto.InternalMessageInfo + +func (m *TaskCreatedEvent) GetTask() *Task { + if m != nil { + return m.Task + } + return nil +} + +type TaskChangedEvent struct { + Before *Task `protobuf:"bytes,1,opt,name=before,proto3" json:"before,omitempty"` + After *Task `protobuf:"bytes,2,opt,name=after,proto3" json:"after,omitempty"` +} + +func (m *TaskChangedEvent) Reset() { *m = TaskChangedEvent{} } +func (*TaskChangedEvent) ProtoMessage() {} +func (*TaskChangedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{13} +} +func (m *TaskChangedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TaskChangedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TaskChangedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TaskChangedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskChangedEvent.Merge(m, src) +} +func (m *TaskChangedEvent) XXX_Size() int { + return m.Size() +} +func (m *TaskChangedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_TaskChangedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskChangedEvent proto.InternalMessageInfo + +func (m *TaskChangedEvent) GetBefore() *Task { + if m != nil { + return m.Before + } + return nil +} + +func (m *TaskChangedEvent) GetAfter() *Task { + if m != nil { + return m.After + } + return nil +} + +type TaskRemovedEvent struct { + Task *Task `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` +} + +func (m *TaskRemovedEvent) Reset() { *m = TaskRemovedEvent{} } +func (*TaskRemovedEvent) ProtoMessage() {} +func (*TaskRemovedEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8f22242cb04491f9, []int{14} +} +func (m *TaskRemovedEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TaskRemovedEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TaskRemovedEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TaskRemovedEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskRemovedEvent.Merge(m, src) +} +func (m *TaskRemovedEvent) XXX_Size() int { + return m.Size() +} +func (m *TaskRemovedEvent) XXX_DiscardUnknown() { + xxx_messageInfo_TaskRemovedEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskRemovedEvent proto.InternalMessageInfo + +func (m *TaskRemovedEvent) GetTask() *Task { + if m != nil { + return m.Task + } + return nil +} + +func init() { + proto.RegisterType((*ActualLRPCreatedEvent)(nil), "models.ActualLRPCreatedEvent") + proto.RegisterType((*ActualLRPChangedEvent)(nil), "models.ActualLRPChangedEvent") + proto.RegisterType((*ActualLRPRemovedEvent)(nil), "models.ActualLRPRemovedEvent") + proto.RegisterType((*ActualLRPInstanceCreatedEvent)(nil), "models.ActualLRPInstanceCreatedEvent") + proto.RegisterType((*ActualLRPInfo)(nil), "models.ActualLRPInfo") + proto.RegisterType((*ActualLRPInstanceChangedEvent)(nil), "models.ActualLRPInstanceChangedEvent") + proto.RegisterType((*ActualLRPInstanceRemovedEvent)(nil), "models.ActualLRPInstanceRemovedEvent") + proto.RegisterType((*DesiredLRPCreatedEvent)(nil), "models.DesiredLRPCreatedEvent") + proto.RegisterType((*DesiredLRPChangedEvent)(nil), "models.DesiredLRPChangedEvent") + proto.RegisterType((*DesiredLRPRemovedEvent)(nil), "models.DesiredLRPRemovedEvent") + proto.RegisterType((*ActualLRPCrashedEvent)(nil), "models.ActualLRPCrashedEvent") + proto.RegisterType((*EventsByCellId)(nil), "models.EventsByCellId") + proto.RegisterType((*TaskCreatedEvent)(nil), "models.TaskCreatedEvent") + proto.RegisterType((*TaskChangedEvent)(nil), "models.TaskChangedEvent") + proto.RegisterType((*TaskRemovedEvent)(nil), "models.TaskRemovedEvent") +} + +func init() { proto.RegisterFile("events.proto", fileDescriptor_8f22242cb04491f9) } + +var fileDescriptor_8f22242cb04491f9 = []byte{ + // 913 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x56, 0x4f, 0x6f, 0xdb, 0x36, + 0x14, 0x17, 0x13, 0xdb, 0x71, 0x9e, 0x3d, 0xc7, 0x61, 0x9b, 0x54, 0x08, 0x3a, 0xc9, 0x33, 0x0a, + 0xd4, 0xd8, 0x56, 0xb7, 0x68, 0x8b, 0x1d, 0x76, 0xda, 0x9c, 0x16, 0x6b, 0xd0, 0x6e, 0x28, 0x88, + 0xee, 0x32, 0x74, 0x10, 0x68, 0x99, 0x76, 0x84, 0xc8, 0xa2, 0x21, 0xd1, 0x01, 0xdc, 0xd3, 0x3e, + 0xc2, 0x6e, 0xfb, 0x0a, 0xfb, 0x0c, 0xbb, 0xed, 0xd6, 0x63, 0x76, 0xeb, 0x49, 0x58, 0x9c, 0xcb, + 0xe0, 0x53, 0x3f, 0xc2, 0x20, 0x52, 0x52, 0x29, 0xdb, 0x48, 0x57, 0x60, 0x39, 0xf4, 0x64, 0xf2, + 0xbd, 0x1f, 0x7f, 0xef, 0x0f, 0x1f, 0x7f, 0x32, 0xd4, 0xd9, 0x29, 0x0b, 0x44, 0xd4, 0x9d, 0x84, + 0x5c, 0x70, 0x5c, 0x19, 0xf3, 0x01, 0xf3, 0xa3, 0x83, 0x3b, 0x23, 0x4f, 0x1c, 0x4f, 0xfb, 0x5d, + 0x97, 0x8f, 0xef, 0x8e, 0xf8, 0x88, 0xdf, 0x95, 0xee, 0xfe, 0x74, 0x28, 0x77, 0x72, 0x23, 0x57, + 0xea, 0xd8, 0x41, 0x93, 0xba, 0x62, 0x4a, 0x7d, 0xc7, 0x0f, 0x27, 0xa9, 0x65, 0x77, 0xc0, 0x22, + 0x2f, 0x64, 0x03, 0xcd, 0x04, 0x82, 0x46, 0x27, 0xe9, 0x7a, 0x7f, 0xcc, 0x07, 0xde, 0xd0, 0x73, + 0xa9, 0xf0, 0x78, 0xe0, 0x08, 0x3a, 0x52, 0xf6, 0xf6, 0xcf, 0xb0, 0xf7, 0xad, 0xa4, 0x7a, 0x46, + 0x9e, 0x1f, 0x86, 0x8c, 0x0a, 0x36, 0x78, 0x9c, 0xe4, 0x87, 0xbf, 0x01, 0x2d, 0x86, 0x33, 0x0a, + 0xf9, 0x74, 0x62, 0xa2, 0x16, 0xea, 0xd4, 0xee, 0xef, 0x77, 0x55, 0xce, 0xdd, 0xfc, 0xe0, 0x77, + 0x89, 0x97, 0x34, 0x14, 0xfe, 0x59, 0x38, 0x91, 0xfb, 0xaf, 0x37, 0x4c, 0xd4, 0x9e, 0xe9, 0xf4, + 0xc7, 0x34, 0x18, 0x65, 0xf4, 0x5d, 0xa8, 0xf4, 0xd9, 0x90, 0x87, 0xec, 0x3d, 0xa4, 0x29, 0x0a, + 0x7f, 0x09, 0x65, 0x3a, 0x14, 0x2c, 0x34, 0x37, 0x2e, 0x85, 0x2b, 0x90, 0x0c, 0xad, 0x57, 0x46, + 0xd8, 0x98, 0x9f, 0xfe, 0xbf, 0x95, 0xbd, 0x82, 0x4f, 0x73, 0xd4, 0x51, 0x10, 0x09, 0x1a, 0xb8, + 0xac, 0xd0, 0xc0, 0x7b, 0x00, 0xef, 0xc2, 0xa4, 0x01, 0x76, 0x57, 0x02, 0x90, 0xed, 0x9c, 0x1b, + 0xdf, 0x86, 0xaa, 0x08, 0xa9, 0xcb, 0x1c, 0x6f, 0x20, 0xcb, 0xdc, 0xee, 0xd5, 0x17, 0xb1, 0x9d, + 0xdb, 0xc8, 0x96, 0x5c, 0x1d, 0x0d, 0xda, 0x7f, 0x96, 0xe0, 0x13, 0x2d, 0xf8, 0x90, 0xe3, 0x1f, + 0xe1, 0x9a, 0x56, 0x53, 0xc0, 0x84, 0xe3, 0x05, 0x43, 0x6e, 0x6e, 0xca, 0xa8, 0xe6, 0x4a, 0xd4, + 0x1f, 0x98, 0x48, 0x8e, 0xf5, 0xea, 0xaf, 0x63, 0xdb, 0x38, 0x8b, 0x6d, 0xb4, 0x88, 0x6d, 0x83, + 0x34, 0xf3, 0x54, 0x52, 0x3f, 0xbe, 0x07, 0x35, 0x37, 0xa4, 0xd1, 0xb1, 0xe3, 0xf2, 0x69, 0x20, + 0xcc, 0x52, 0x0b, 0x75, 0xca, 0xbd, 0x9d, 0x45, 0x6c, 0xeb, 0x66, 0x02, 0x72, 0x73, 0x98, 0xac, + 0xf1, 0x67, 0x50, 0x57, 0xae, 0x90, 0xd1, 0x88, 0x07, 0x66, 0x39, 0xa9, 0x83, 0x28, 0x38, 0x91, + 0x26, 0x6c, 0x43, 0x39, 0x12, 0x54, 0x30, 0xb3, 0x22, 0x6b, 0xdc, 0x5e, 0xc4, 0xb6, 0x32, 0x10, + 0xf5, 0x83, 0x6f, 0xc3, 0xce, 0xc4, 0xa7, 0x2e, 0x1b, 0xb3, 0x40, 0x38, 0x2c, 0x0c, 0x79, 0x68, + 0x6e, 0x49, 0x9a, 0x46, 0x6e, 0x7e, 0x9c, 0x58, 0x25, 0x93, 0x17, 0xb8, 0xcc, 0xac, 0xb6, 0x50, + 0x67, 0x33, 0x65, 0x4a, 0x0c, 0x44, 0xfd, 0xe0, 0x97, 0xd0, 0x5c, 0x9e, 0x7b, 0x73, 0x5b, 0xf6, + 0xe4, 0x46, 0xd6, 0x93, 0xef, 0x35, 0xff, 0x0b, 0x3a, 0xea, 0x99, 0x49, 0x4b, 0x16, 0xb1, 0xbd, + 0x72, 0x90, 0xec, 0x8c, 0x8b, 0x50, 0xfc, 0x08, 0xaa, 0x93, 0x90, 0x45, 0x2c, 0xc9, 0x00, 0x5a, + 0xa8, 0xd3, 0xb8, 0x7f, 0xb0, 0xd2, 0xe9, 0xee, 0xf3, 0x14, 0xa1, 0xee, 0x32, 0xc3, 0x93, 0x7c, + 0x85, 0x6f, 0x42, 0x95, 0xf0, 0xa9, 0xa0, 0x7d, 0x9f, 0x99, 0xb5, 0x16, 0xea, 0x54, 0x9f, 0x18, + 0x24, 0xb7, 0xe0, 0x1e, 0xec, 0xd2, 0x53, 0xea, 0xf9, 0xb4, 0xef, 0xf9, 0x9e, 0x98, 0x39, 0xaf, + 0x78, 0xc0, 0xcc, 0xba, 0x6c, 0xdc, 0xde, 0x22, 0xb6, 0x57, 0x9d, 0xa4, 0xa9, 0x9b, 0x7e, 0xe2, + 0x01, 0xeb, 0x5d, 0x83, 0x5d, 0x3e, 0x49, 0x92, 0xa6, 0xbe, 0x13, 0xa6, 0xc4, 0xed, 0xbf, 0x36, + 0xd6, 0x0d, 0xb0, 0xfe, 0x44, 0x9f, 0x40, 0x43, 0x9b, 0xa9, 0x13, 0x36, 0x4b, 0x87, 0xf8, 0xfa, + 0x4a, 0x91, 0x4f, 0xd9, 0x6c, 0x69, 0x94, 0xea, 0xf9, 0x28, 0x3d, 0x65, 0x33, 0x4c, 0xe1, 0x86, + 0xc6, 0xe4, 0xa5, 0xc1, 0x24, 0xa5, 0x7a, 0xce, 0x37, 0x57, 0x28, 0xb3, 0x8c, 0x56, 0xa9, 0xaf, + 0xe7, 0xd4, 0x1a, 0x06, 0xdf, 0xc9, 0xf5, 0x44, 0xcd, 0xfc, 0xde, 0x1a, 0xc6, 0x21, 0xcf, 0xe5, + 0xe4, 0x8b, 0x4c, 0x4e, 0x4a, 0x97, 0xa1, 0x15, 0xa6, 0xf0, 0x2e, 0xcb, 0x97, 0xbd, 0xcb, 0x75, + 0x9a, 0x50, 0x90, 0x9e, 0x2b, 0xd4, 0x84, 0x53, 0xd8, 0x7f, 0xa4, 0xbe, 0x00, 0xcb, 0x4a, 0xfe, + 0x00, 0x6a, 0xda, 0xb7, 0x21, 0x8d, 0x8a, 0xb3, 0xa8, 0xef, 0x0e, 0x11, 0x48, 0x61, 0x1f, 0x14, + 0xf7, 0x37, 0x54, 0x08, 0xac, 0x0f, 0xd0, 0xe7, 0x4b, 0x1a, 0xbf, 0x2e, 0x66, 0x76, 0x21, 0x9d, + 0xa2, 0xbe, 0xaf, 0x83, 0xae, 0xb9, 0x8d, 0xcd, 0xff, 0xdc, 0x91, 0xc2, 0x35, 0x5c, 0x6d, 0x47, + 0xfe, 0xd8, 0x28, 0x7c, 0x53, 0x69, 0x74, 0xfc, 0x51, 0xbe, 0xa8, 0x25, 0xed, 0xdf, 0xfc, 0x70, + 0xed, 0x2f, 0xad, 0xd7, 0x7e, 0xa9, 0xd8, 0xe5, 0xf5, 0x8a, 0xdd, 0xfe, 0x0a, 0x1a, 0xb2, 0x57, + 0x51, 0x6f, 0x76, 0xc8, 0x7c, 0xff, 0x68, 0x80, 0x6f, 0xc1, 0x96, 0xcb, 0x7c, 0x3f, 0x69, 0x3b, + 0x92, 0x6d, 0xaf, 0x2d, 0x62, 0x3b, 0x33, 0x91, 0x8a, 0x2b, 0x51, 0xed, 0x87, 0xd0, 0x7c, 0x41, + 0xa3, 0x93, 0xc2, 0xe0, 0xb7, 0xa0, 0x94, 0xfc, 0x03, 0x4a, 0x9b, 0x5c, 0xcf, 0x3a, 0x92, 0xe0, + 0x88, 0xf4, 0xb4, 0x5f, 0xa6, 0xa7, 0xf4, 0xa9, 0xbd, 0xb5, 0x34, 0xb5, 0xc5, 0x73, 0xd9, 0xbc, + 0xb6, 0x8b, 0xf3, 0x5a, 0x04, 0x29, 0x57, 0x96, 0x53, 0x61, 0xf4, 0xde, 0x9b, 0x53, 0xef, 0xe1, + 0xd9, 0xb9, 0x65, 0xbc, 0x39, 0xb7, 0x8c, 0xb7, 0xe7, 0x16, 0xfa, 0x65, 0x6e, 0xa1, 0xdf, 0xe7, + 0x16, 0x7a, 0x3d, 0xb7, 0xd0, 0xd9, 0xdc, 0x42, 0x7f, 0xcf, 0x2d, 0xf4, 0xcf, 0xdc, 0x32, 0xde, + 0xce, 0x2d, 0xf4, 0xeb, 0x85, 0x65, 0x9c, 0x5d, 0x58, 0xc6, 0x9b, 0x0b, 0xcb, 0xe8, 0x57, 0xe4, + 0xdf, 0xb9, 0x07, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x15, 0xfc, 0xcc, 0x44, 0x5e, 0x0a, 0x00, + 0x00, +} + +func (this *ActualLRPCreatedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPCreatedEvent) + if !ok { + that2, ok := that.(ActualLRPCreatedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ActualLrpGroup.Equal(that1.ActualLrpGroup) { + return false + } + return true +} +func (this *ActualLRPChangedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPChangedEvent) + if !ok { + that2, ok := that.(ActualLRPChangedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Before.Equal(that1.Before) { + return false + } + if !this.After.Equal(that1.After) { + return false + } + return true +} +func (this *ActualLRPRemovedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPRemovedEvent) + if !ok { + that2, ok := that.(ActualLRPRemovedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ActualLrpGroup.Equal(that1.ActualLrpGroup) { + return false + } + return true +} +func (this *ActualLRPInstanceCreatedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPInstanceCreatedEvent) + if !ok { + that2, ok := that.(ActualLRPInstanceCreatedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ActualLrp.Equal(that1.ActualLrp) { + return false + } + if this.TraceId != that1.TraceId { + return false + } + return true +} +func (this *ActualLRPInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPInfo) + if !ok { + that2, ok := that.(ActualLRPInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ActualLRPNetInfo.Equal(&that1.ActualLRPNetInfo) { + return false + } + if this.CrashCount != that1.CrashCount { + return false + } + if this.CrashReason != that1.CrashReason { + return false + } + if this.State != that1.State { + return false + } + if this.PlacementError != that1.PlacementError { + return false + } + if this.Since != that1.Since { + return false + } + if !this.ModificationTag.Equal(&that1.ModificationTag) { + return false + } + if this.Presence != that1.Presence { + return false + } + if that1.OptionalRoutable == nil { + if this.OptionalRoutable != nil { + return false + } + } else if this.OptionalRoutable == nil { + return false + } else if !this.OptionalRoutable.Equal(that1.OptionalRoutable) { + return false + } + if this.AvailabilityZone != that1.AvailabilityZone { + return false + } + return true +} +func (this *ActualLRPInfo_Routable) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPInfo_Routable) + if !ok { + that2, ok := that.(ActualLRPInfo_Routable) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Routable != that1.Routable { + return false + } + return true +} +func (this *ActualLRPInstanceChangedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPInstanceChangedEvent) + if !ok { + that2, ok := that.(ActualLRPInstanceChangedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ActualLRPKey.Equal(&that1.ActualLRPKey) { + return false + } + if !this.ActualLRPInstanceKey.Equal(&that1.ActualLRPInstanceKey) { + return false + } + if !this.Before.Equal(that1.Before) { + return false + } + if !this.After.Equal(that1.After) { + return false + } + if this.TraceId != that1.TraceId { + return false + } + return true +} +func (this *ActualLRPInstanceRemovedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPInstanceRemovedEvent) + if !ok { + that2, ok := that.(ActualLRPInstanceRemovedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ActualLrp.Equal(that1.ActualLrp) { + return false + } + if this.TraceId != that1.TraceId { + return false + } + return true +} +func (this *DesiredLRPCreatedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPCreatedEvent) + if !ok { + that2, ok := that.(DesiredLRPCreatedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.DesiredLrp.Equal(that1.DesiredLrp) { + return false + } + if this.TraceId != that1.TraceId { + return false + } + return true +} +func (this *DesiredLRPChangedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPChangedEvent) + if !ok { + that2, ok := that.(DesiredLRPChangedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Before.Equal(that1.Before) { + return false + } + if !this.After.Equal(that1.After) { + return false + } + if this.TraceId != that1.TraceId { + return false + } + return true +} +func (this *DesiredLRPRemovedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesiredLRPRemovedEvent) + if !ok { + that2, ok := that.(DesiredLRPRemovedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.DesiredLrp.Equal(that1.DesiredLrp) { + return false + } + if this.TraceId != that1.TraceId { + return false + } + return true +} +func (this *ActualLRPCrashedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ActualLRPCrashedEvent) + if !ok { + that2, ok := that.(ActualLRPCrashedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ActualLRPKey.Equal(&that1.ActualLRPKey) { + return false + } + if !this.ActualLRPInstanceKey.Equal(&that1.ActualLRPInstanceKey) { + return false + } + if this.CrashCount != that1.CrashCount { + return false + } + if this.CrashReason != that1.CrashReason { + return false + } + if this.Since != that1.Since { + return false + } + return true +} +func (this *EventsByCellId) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*EventsByCellId) + if !ok { + that2, ok := that.(EventsByCellId) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.CellId != that1.CellId { + return false + } + return true +} +func (this *TaskCreatedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TaskCreatedEvent) + if !ok { + that2, ok := that.(TaskCreatedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Task.Equal(that1.Task) { + return false + } + return true +} +func (this *TaskChangedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TaskChangedEvent) + if !ok { + that2, ok := that.(TaskChangedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Before.Equal(that1.Before) { + return false + } + if !this.After.Equal(that1.After) { + return false + } + return true +} +func (this *TaskRemovedEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TaskRemovedEvent) + if !ok { + that2, ok := that.(TaskRemovedEvent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Task.Equal(that1.Task) { + return false + } + return true +} +func (this *ActualLRPCreatedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.ActualLRPCreatedEvent{") + if this.ActualLrpGroup != nil { + s = append(s, "ActualLrpGroup: "+fmt.Sprintf("%#v", this.ActualLrpGroup)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPChangedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ActualLRPChangedEvent{") + if this.Before != nil { + s = append(s, "Before: "+fmt.Sprintf("%#v", this.Before)+",\n") + } + if this.After != nil { + s = append(s, "After: "+fmt.Sprintf("%#v", this.After)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPRemovedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.ActualLRPRemovedEvent{") + if this.ActualLrpGroup != nil { + s = append(s, "ActualLrpGroup: "+fmt.Sprintf("%#v", this.ActualLrpGroup)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPInstanceCreatedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ActualLRPInstanceCreatedEvent{") + if this.ActualLrp != nil { + s = append(s, "ActualLrp: "+fmt.Sprintf("%#v", this.ActualLrp)+",\n") + } + s = append(s, "TraceId: "+fmt.Sprintf("%#v", this.TraceId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&models.ActualLRPInfo{") + s = append(s, "ActualLRPNetInfo: "+strings.Replace(this.ActualLRPNetInfo.GoString(), `&`, ``, 1)+",\n") + s = append(s, "CrashCount: "+fmt.Sprintf("%#v", this.CrashCount)+",\n") + s = append(s, "CrashReason: "+fmt.Sprintf("%#v", this.CrashReason)+",\n") + s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") + s = append(s, "PlacementError: "+fmt.Sprintf("%#v", this.PlacementError)+",\n") + s = append(s, "Since: "+fmt.Sprintf("%#v", this.Since)+",\n") + s = append(s, "ModificationTag: "+strings.Replace(this.ModificationTag.GoString(), `&`, ``, 1)+",\n") + s = append(s, "Presence: "+fmt.Sprintf("%#v", this.Presence)+",\n") + if this.OptionalRoutable != nil { + s = append(s, "OptionalRoutable: "+fmt.Sprintf("%#v", this.OptionalRoutable)+",\n") + } + s = append(s, "AvailabilityZone: "+fmt.Sprintf("%#v", this.AvailabilityZone)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPInfo_Routable) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&models.ActualLRPInfo_Routable{` + + `Routable:` + fmt.Sprintf("%#v", this.Routable) + `}`}, ", ") + return s +} +func (this *ActualLRPInstanceChangedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&models.ActualLRPInstanceChangedEvent{") + s = append(s, "ActualLRPKey: "+strings.Replace(this.ActualLRPKey.GoString(), `&`, ``, 1)+",\n") + s = append(s, "ActualLRPInstanceKey: "+strings.Replace(this.ActualLRPInstanceKey.GoString(), `&`, ``, 1)+",\n") + if this.Before != nil { + s = append(s, "Before: "+fmt.Sprintf("%#v", this.Before)+",\n") + } + if this.After != nil { + s = append(s, "After: "+fmt.Sprintf("%#v", this.After)+",\n") + } + s = append(s, "TraceId: "+fmt.Sprintf("%#v", this.TraceId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPInstanceRemovedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ActualLRPInstanceRemovedEvent{") + if this.ActualLrp != nil { + s = append(s, "ActualLrp: "+fmt.Sprintf("%#v", this.ActualLrp)+",\n") + } + s = append(s, "TraceId: "+fmt.Sprintf("%#v", this.TraceId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPCreatedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.DesiredLRPCreatedEvent{") + if this.DesiredLrp != nil { + s = append(s, "DesiredLrp: "+fmt.Sprintf("%#v", this.DesiredLrp)+",\n") + } + s = append(s, "TraceId: "+fmt.Sprintf("%#v", this.TraceId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPChangedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&models.DesiredLRPChangedEvent{") + if this.Before != nil { + s = append(s, "Before: "+fmt.Sprintf("%#v", this.Before)+",\n") + } + if this.After != nil { + s = append(s, "After: "+fmt.Sprintf("%#v", this.After)+",\n") + } + s = append(s, "TraceId: "+fmt.Sprintf("%#v", this.TraceId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesiredLRPRemovedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.DesiredLRPRemovedEvent{") + if this.DesiredLrp != nil { + s = append(s, "DesiredLrp: "+fmt.Sprintf("%#v", this.DesiredLrp)+",\n") + } + s = append(s, "TraceId: "+fmt.Sprintf("%#v", this.TraceId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActualLRPCrashedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&models.ActualLRPCrashedEvent{") + s = append(s, "ActualLRPKey: "+strings.Replace(this.ActualLRPKey.GoString(), `&`, ``, 1)+",\n") + s = append(s, "ActualLRPInstanceKey: "+strings.Replace(this.ActualLRPInstanceKey.GoString(), `&`, ``, 1)+",\n") + s = append(s, "CrashCount: "+fmt.Sprintf("%#v", this.CrashCount)+",\n") + s = append(s, "CrashReason: "+fmt.Sprintf("%#v", this.CrashReason)+",\n") + s = append(s, "Since: "+fmt.Sprintf("%#v", this.Since)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EventsByCellId) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.EventsByCellId{") + s = append(s, "CellId: "+fmt.Sprintf("%#v", this.CellId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskCreatedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.TaskCreatedEvent{") + if this.Task != nil { + s = append(s, "Task: "+fmt.Sprintf("%#v", this.Task)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskChangedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.TaskChangedEvent{") + if this.Before != nil { + s = append(s, "Before: "+fmt.Sprintf("%#v", this.Before)+",\n") + } + if this.After != nil { + s = append(s, "After: "+fmt.Sprintf("%#v", this.After)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskRemovedEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.TaskRemovedEvent{") + if this.Task != nil { + s = append(s, "Task: "+fmt.Sprintf("%#v", this.Task)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringEvents(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *ActualLRPCreatedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPCreatedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPCreatedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ActualLrpGroup != nil { + { + size, err := m.ActualLrpGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPChangedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPChangedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPChangedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.After != nil { + { + size, err := m.After.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Before != nil { + { + size, err := m.Before.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPRemovedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPRemovedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPRemovedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ActualLrpGroup != nil { + { + size, err := m.ActualLrpGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPInstanceCreatedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPInstanceCreatedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPInstanceCreatedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TraceId) > 0 { + i -= len(m.TraceId) + copy(dAtA[i:], m.TraceId) + i = encodeVarintEvents(dAtA, i, uint64(len(m.TraceId))) + i-- + dAtA[i] = 0x12 + } + if m.ActualLrp != nil { + { + size, err := m.ActualLrp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AvailabilityZone) > 0 { + i -= len(m.AvailabilityZone) + copy(dAtA[i:], m.AvailabilityZone) + i = encodeVarintEvents(dAtA, i, uint64(len(m.AvailabilityZone))) + i-- + dAtA[i] = 0x62 + } + if m.OptionalRoutable != nil { + { + size := m.OptionalRoutable.Size() + i -= size + if _, err := m.OptionalRoutable.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.Presence != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Presence)) + i-- + dAtA[i] = 0x50 + } + { + size, err := m.ModificationTag.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + if m.Since != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Since)) + i-- + dAtA[i] = 0x40 + } + if len(m.PlacementError) > 0 { + i -= len(m.PlacementError) + copy(dAtA[i:], m.PlacementError) + i = encodeVarintEvents(dAtA, i, uint64(len(m.PlacementError))) + i-- + dAtA[i] = 0x3a + } + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintEvents(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x32 + } + if len(m.CrashReason) > 0 { + i -= len(m.CrashReason) + copy(dAtA[i:], m.CrashReason) + i = encodeVarintEvents(dAtA, i, uint64(len(m.CrashReason))) + i-- + dAtA[i] = 0x2a + } + if m.CrashCount != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.CrashCount)) + i-- + dAtA[i] = 0x20 + } + { + size, err := m.ActualLRPNetInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} + +func (m *ActualLRPInfo_Routable) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPInfo_Routable) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.Routable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + return len(dAtA) - i, nil +} +func (m *ActualLRPInstanceChangedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPInstanceChangedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPInstanceChangedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TraceId) > 0 { + i -= len(m.TraceId) + copy(dAtA[i:], m.TraceId) + i = encodeVarintEvents(dAtA, i, uint64(len(m.TraceId))) + i-- + dAtA[i] = 0x2a + } + if m.After != nil { + { + size, err := m.After.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Before != nil { + { + size, err := m.Before.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.ActualLRPInstanceKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ActualLRPKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ActualLRPInstanceRemovedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPInstanceRemovedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPInstanceRemovedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TraceId) > 0 { + i -= len(m.TraceId) + copy(dAtA[i:], m.TraceId) + i = encodeVarintEvents(dAtA, i, uint64(len(m.TraceId))) + i-- + dAtA[i] = 0x12 + } + if m.ActualLrp != nil { + { + size, err := m.ActualLrp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRPCreatedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPCreatedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPCreatedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TraceId) > 0 { + i -= len(m.TraceId) + copy(dAtA[i:], m.TraceId) + i = encodeVarintEvents(dAtA, i, uint64(len(m.TraceId))) + i-- + dAtA[i] = 0x12 + } + if m.DesiredLrp != nil { + { + size, err := m.DesiredLrp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRPChangedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPChangedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPChangedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TraceId) > 0 { + i -= len(m.TraceId) + copy(dAtA[i:], m.TraceId) + i = encodeVarintEvents(dAtA, i, uint64(len(m.TraceId))) + i-- + dAtA[i] = 0x1a + } + if m.After != nil { + { + size, err := m.After.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Before != nil { + { + size, err := m.Before.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DesiredLRPRemovedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesiredLRPRemovedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesiredLRPRemovedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TraceId) > 0 { + i -= len(m.TraceId) + copy(dAtA[i:], m.TraceId) + i = encodeVarintEvents(dAtA, i, uint64(len(m.TraceId))) + i-- + dAtA[i] = 0x12 + } + if m.DesiredLrp != nil { + { + size, err := m.DesiredLrp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ActualLRPCrashedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActualLRPCrashedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActualLRPCrashedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Since != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Since)) + i-- + dAtA[i] = 0x28 + } + if len(m.CrashReason) > 0 { + i -= len(m.CrashReason) + copy(dAtA[i:], m.CrashReason) + i = encodeVarintEvents(dAtA, i, uint64(len(m.CrashReason))) + i-- + dAtA[i] = 0x22 + } + if m.CrashCount != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.CrashCount)) + i-- + dAtA[i] = 0x18 + } + { + size, err := m.ActualLRPInstanceKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ActualLRPKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EventsByCellId) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventsByCellId) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventsByCellId) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.CellId) > 0 { + i -= len(m.CellId) + copy(dAtA[i:], m.CellId) + i = encodeVarintEvents(dAtA, i, uint64(len(m.CellId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TaskCreatedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskCreatedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskCreatedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Task != nil { + { + size, err := m.Task.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TaskChangedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskChangedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskChangedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.After != nil { + { + size, err := m.After.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Before != nil { + { + size, err := m.Before.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TaskRemovedEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskRemovedEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskRemovedEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Task != nil { + { + size, err := m.Task.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintEvents(dAtA []byte, offset int, v uint64) int { + offset -= sovEvents(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ActualLRPCreatedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrpGroup != nil { + l = m.ActualLrpGroup.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *ActualLRPChangedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Before != nil { + l = m.Before.Size() + n += 1 + l + sovEvents(uint64(l)) + } + if m.After != nil { + l = m.After.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *ActualLRPRemovedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrpGroup != nil { + l = m.ActualLrpGroup.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *ActualLRPInstanceCreatedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrp != nil { + l = m.ActualLrp.Size() + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.TraceId) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *ActualLRPInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ActualLRPNetInfo.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.CrashCount != 0 { + n += 1 + sovEvents(uint64(m.CrashCount)) + } + l = len(m.CrashReason) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.State) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.PlacementError) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.Since != 0 { + n += 1 + sovEvents(uint64(m.Since)) + } + l = m.ModificationTag.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.Presence != 0 { + n += 1 + sovEvents(uint64(m.Presence)) + } + if m.OptionalRoutable != nil { + n += m.OptionalRoutable.Size() + } + l = len(m.AvailabilityZone) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *ActualLRPInfo_Routable) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *ActualLRPInstanceChangedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ActualLRPKey.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ActualLRPInstanceKey.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.Before != nil { + l = m.Before.Size() + n += 1 + l + sovEvents(uint64(l)) + } + if m.After != nil { + l = m.After.Size() + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.TraceId) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *ActualLRPInstanceRemovedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActualLrp != nil { + l = m.ActualLrp.Size() + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.TraceId) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *DesiredLRPCreatedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DesiredLrp != nil { + l = m.DesiredLrp.Size() + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.TraceId) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *DesiredLRPChangedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Before != nil { + l = m.Before.Size() + n += 1 + l + sovEvents(uint64(l)) + } + if m.After != nil { + l = m.After.Size() + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.TraceId) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *DesiredLRPRemovedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DesiredLrp != nil { + l = m.DesiredLrp.Size() + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.TraceId) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *ActualLRPCrashedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ActualLRPKey.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ActualLRPInstanceKey.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.CrashCount != 0 { + n += 1 + sovEvents(uint64(m.CrashCount)) + } + l = len(m.CrashReason) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.Since != 0 { + n += 1 + sovEvents(uint64(m.Since)) + } + return n +} + +func (m *EventsByCellId) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CellId) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *TaskCreatedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Task != nil { + l = m.Task.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *TaskChangedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Before != nil { + l = m.Before.Size() + n += 1 + l + sovEvents(uint64(l)) + } + if m.After != nil { + l = m.After.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *TaskRemovedEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Task != nil { + l = m.Task.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func sovEvents(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEvents(x uint64) (n int) { + return sovEvents(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ActualLRPCreatedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPCreatedEvent{`, + `ActualLrpGroup:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpGroup), "ActualLRPGroup", "ActualLRPGroup", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPChangedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPChangedEvent{`, + `Before:` + strings.Replace(fmt.Sprintf("%v", this.Before), "ActualLRPGroup", "ActualLRPGroup", 1) + `,`, + `After:` + strings.Replace(fmt.Sprintf("%v", this.After), "ActualLRPGroup", "ActualLRPGroup", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPRemovedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPRemovedEvent{`, + `ActualLrpGroup:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrpGroup), "ActualLRPGroup", "ActualLRPGroup", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPInstanceCreatedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPInstanceCreatedEvent{`, + `ActualLrp:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrp), "ActualLRP", "ActualLRP", 1) + `,`, + `TraceId:` + fmt.Sprintf("%v", this.TraceId) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPInfo{`, + `ActualLRPNetInfo:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ActualLRPNetInfo), "ActualLRPNetInfo", "ActualLRPNetInfo", 1), `&`, ``, 1) + `,`, + `CrashCount:` + fmt.Sprintf("%v", this.CrashCount) + `,`, + `CrashReason:` + fmt.Sprintf("%v", this.CrashReason) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `PlacementError:` + fmt.Sprintf("%v", this.PlacementError) + `,`, + `Since:` + fmt.Sprintf("%v", this.Since) + `,`, + `ModificationTag:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ModificationTag), "ModificationTag", "ModificationTag", 1), `&`, ``, 1) + `,`, + `Presence:` + fmt.Sprintf("%v", this.Presence) + `,`, + `OptionalRoutable:` + fmt.Sprintf("%v", this.OptionalRoutable) + `,`, + `AvailabilityZone:` + fmt.Sprintf("%v", this.AvailabilityZone) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPInfo_Routable) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPInfo_Routable{`, + `Routable:` + fmt.Sprintf("%v", this.Routable) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPInstanceChangedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPInstanceChangedEvent{`, + `ActualLRPKey:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ActualLRPKey), "ActualLRPKey", "ActualLRPKey", 1), `&`, ``, 1) + `,`, + `ActualLRPInstanceKey:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ActualLRPInstanceKey), "ActualLRPInstanceKey", "ActualLRPInstanceKey", 1), `&`, ``, 1) + `,`, + `Before:` + strings.Replace(this.Before.String(), "ActualLRPInfo", "ActualLRPInfo", 1) + `,`, + `After:` + strings.Replace(this.After.String(), "ActualLRPInfo", "ActualLRPInfo", 1) + `,`, + `TraceId:` + fmt.Sprintf("%v", this.TraceId) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPInstanceRemovedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPInstanceRemovedEvent{`, + `ActualLrp:` + strings.Replace(fmt.Sprintf("%v", this.ActualLrp), "ActualLRP", "ActualLRP", 1) + `,`, + `TraceId:` + fmt.Sprintf("%v", this.TraceId) + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPCreatedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPCreatedEvent{`, + `DesiredLrp:` + strings.Replace(fmt.Sprintf("%v", this.DesiredLrp), "DesiredLRP", "DesiredLRP", 1) + `,`, + `TraceId:` + fmt.Sprintf("%v", this.TraceId) + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPChangedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPChangedEvent{`, + `Before:` + strings.Replace(fmt.Sprintf("%v", this.Before), "DesiredLRP", "DesiredLRP", 1) + `,`, + `After:` + strings.Replace(fmt.Sprintf("%v", this.After), "DesiredLRP", "DesiredLRP", 1) + `,`, + `TraceId:` + fmt.Sprintf("%v", this.TraceId) + `,`, + `}`, + }, "") + return s +} +func (this *DesiredLRPRemovedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesiredLRPRemovedEvent{`, + `DesiredLrp:` + strings.Replace(fmt.Sprintf("%v", this.DesiredLrp), "DesiredLRP", "DesiredLRP", 1) + `,`, + `TraceId:` + fmt.Sprintf("%v", this.TraceId) + `,`, + `}`, + }, "") + return s +} +func (this *ActualLRPCrashedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ActualLRPCrashedEvent{`, + `ActualLRPKey:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ActualLRPKey), "ActualLRPKey", "ActualLRPKey", 1), `&`, ``, 1) + `,`, + `ActualLRPInstanceKey:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ActualLRPInstanceKey), "ActualLRPInstanceKey", "ActualLRPInstanceKey", 1), `&`, ``, 1) + `,`, + `CrashCount:` + fmt.Sprintf("%v", this.CrashCount) + `,`, + `CrashReason:` + fmt.Sprintf("%v", this.CrashReason) + `,`, + `Since:` + fmt.Sprintf("%v", this.Since) + `,`, + `}`, + }, "") + return s +} +func (this *EventsByCellId) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventsByCellId{`, + `CellId:` + fmt.Sprintf("%v", this.CellId) + `,`, + `}`, + }, "") + return s +} +func (this *TaskCreatedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskCreatedEvent{`, + `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskChangedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskChangedEvent{`, + `Before:` + strings.Replace(fmt.Sprintf("%v", this.Before), "Task", "Task", 1) + `,`, + `After:` + strings.Replace(fmt.Sprintf("%v", this.After), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskRemovedEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskRemovedEvent{`, + `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringEvents(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ActualLRPCreatedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPCreatedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPCreatedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpGroup == nil { + m.ActualLrpGroup = &ActualLRPGroup{} + } + if err := m.ActualLrpGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPChangedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPChangedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPChangedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Before", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Before == nil { + m.Before = &ActualLRPGroup{} + } + if err := m.Before.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field After", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.After == nil { + m.After = &ActualLRPGroup{} + } + if err := m.After.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPRemovedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPRemovedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPRemovedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrpGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrpGroup == nil { + m.ActualLrpGroup = &ActualLRPGroup{} + } + if err := m.ActualLrpGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPInstanceCreatedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPInstanceCreatedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPInstanceCreatedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrp == nil { + m.ActualLrp = &ActualLRP{} + } + if err := m.ActualLrp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TraceId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLRPNetInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ActualLRPNetInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CrashCount", wireType) + } + m.CrashCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CrashCount |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CrashReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CrashReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PlacementError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PlacementError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType) + } + m.Since = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Since |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ModificationTag", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ModificationTag.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Presence", wireType) + } + m.Presence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Presence |= ActualLRP_Presence(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Routable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OptionalRoutable = &ActualLRPInfo_Routable{b} + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailabilityZone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AvailabilityZone = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPInstanceChangedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPInstanceChangedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPInstanceChangedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLRPKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ActualLRPKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLRPInstanceKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ActualLRPInstanceKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Before", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Before == nil { + m.Before = &ActualLRPInfo{} + } + if err := m.Before.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field After", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.After == nil { + m.After = &ActualLRPInfo{} + } + if err := m.After.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TraceId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPInstanceRemovedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPInstanceRemovedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPInstanceRemovedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLrp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ActualLrp == nil { + m.ActualLrp = &ActualLRP{} + } + if err := m.ActualLrp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TraceId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPCreatedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPCreatedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPCreatedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredLrp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DesiredLrp == nil { + m.DesiredLrp = &DesiredLRP{} + } + if err := m.DesiredLrp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TraceId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPChangedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPChangedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPChangedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Before", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Before == nil { + m.Before = &DesiredLRP{} + } + if err := m.Before.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field After", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.After == nil { + m.After = &DesiredLRP{} + } + if err := m.After.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TraceId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesiredLRPRemovedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesiredLRPRemovedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesiredLRPRemovedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredLrp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DesiredLrp == nil { + m.DesiredLrp = &DesiredLRP{} + } + if err := m.DesiredLrp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TraceId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ActualLRPCrashedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ActualLRPCrashedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ActualLRPCrashedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLRPKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ActualLRPKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualLRPInstanceKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ActualLRPInstanceKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CrashCount", wireType) + } + m.CrashCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CrashCount |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CrashReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CrashReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType) + } + m.Since = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Since |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventsByCellId) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventsByCellId: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventsByCellId: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CellId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskCreatedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskCreatedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskCreatedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Task == nil { + m.Task = &Task{} + } + if err := m.Task.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskChangedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskChangedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskChangedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Before", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Before == nil { + m.Before = &Task{} + } + if err := m.Before.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field After", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.After == nil { + m.After = &Task{} + } + if err := m.After.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskRemovedEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskRemovedEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskRemovedEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Task == nil { + m.Task = &Task{} + } + if err := m.Task.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEvents(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEvents + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEvents + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEvents + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEvents = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEvents = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/events.proto b/vendor/code.cloudfoundry.org/bbs/models/events.proto new file mode 100644 index 00000000..c6bf8ecc --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/events.proto @@ -0,0 +1,99 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "actual_lrp.proto"; +import "desired_lrp.proto"; +import "task.proto"; +import "modification_tag.proto"; + +message ActualLRPCreatedEvent { + option deprecated = true; + ActualLRPGroup actual_lrp_group = 1; +} + +message ActualLRPChangedEvent { + option deprecated = true; + ActualLRPGroup before = 1; + ActualLRPGroup after = 2; +} + +message ActualLRPRemovedEvent { + option deprecated = true; + ActualLRPGroup actual_lrp_group = 1; +} + +message ActualLRPInstanceCreatedEvent { + ActualLRP actual_lrp = 1; + string trace_id = 2 [(gogoproto.jsontag) = "trace_id"]; +} + +message ActualLRPInfo { + ActualLRPNetInfo actual_lrp_net_info = 3 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + int32 crash_count = 4 [(gogoproto.jsontag) = "crash_count"]; + string crash_reason = 5; + string state = 6 [(gogoproto.jsontag) = "state"]; + string placement_error = 7; + int64 since = 8 [(gogoproto.jsontag) = "since"]; + ModificationTag modification_tag = 9 [(gogoproto.nullable) = false,(gogoproto.jsontag) = "modification_tag"]; + ActualLRP.Presence presence = 10 [(gogoproto.jsontag) = "presence"]; + oneof optional_routable { + bool Routable = 11; + } + string availability_zone = 12 [(gogoproto.jsontag) = "availability_zone"]; +} + +message ActualLRPInstanceChangedEvent { + ActualLRPKey actual_lrp_key = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + ActualLRPInstanceKey actual_lrp_instance_key = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + ActualLRPInfo before = 3; + ActualLRPInfo after = 4; + string trace_id = 5 [(gogoproto.jsontag) = "trace_id"]; +} + +message ActualLRPInstanceRemovedEvent { + ActualLRP actual_lrp = 1; + string trace_id = 2 [(gogoproto.jsontag) = "trace_id"]; +} + +message DesiredLRPCreatedEvent { + DesiredLRP desired_lrp = 1; + string trace_id = 2 [(gogoproto.jsontag) = "trace_id"]; +} + +message DesiredLRPChangedEvent { + DesiredLRP before = 1; + DesiredLRP after = 2; + string trace_id = 3 [(gogoproto.jsontag) = "trace_id"]; +} + +message DesiredLRPRemovedEvent { + DesiredLRP desired_lrp = 1; + string trace_id = 2 [(gogoproto.jsontag) = "trace_id"]; +} + +message ActualLRPCrashedEvent { + ActualLRPKey actual_lrp_key = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + ActualLRPInstanceKey actual_lrp_instance_key = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "", (gogoproto.embed) = true]; + int32 crash_count = 3 [(gogoproto.jsontag) = "crash_count"]; + string crash_reason = 4; + int64 since = 5 [(gogoproto.jsontag) = "since"]; +} + +message EventsByCellId { + string cell_id = 1 [(gogoproto.jsontag) = "cell_id"]; +} + +message TaskCreatedEvent { + Task task = 1; +} + +message TaskChangedEvent { + Task before = 1; + Task after = 2; +} + +message TaskRemovedEvent { + Task task = 1; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/image_layer.go b/vendor/code.cloudfoundry.org/bbs/models/image_layer.go new file mode 100644 index 00000000..6a62e561 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/image_layer.go @@ -0,0 +1,228 @@ +package models + +import ( + "encoding/json" + "fmt" + strings "strings" +) + +func (l *ImageLayer) Validate() error { + var validationError ValidationError + + if l.GetUrl() == "" { + validationError = validationError.Append(ErrInvalidField{"url"}) + } + + if l.GetDestinationPath() == "" { + validationError = validationError.Append(ErrInvalidField{"destination_path"}) + } + + if !l.LayerType.Valid() { + validationError = validationError.Append(ErrInvalidField{"layer_type"}) + } + + if !l.MediaType.Valid() { + validationError = validationError.Append(ErrInvalidField{"media_type"}) + } + + if (l.DigestValue != "" || l.LayerType == LayerTypeExclusive) && l.DigestAlgorithm == DigestAlgorithmInvalid { + validationError = validationError.Append(ErrInvalidField{"digest_algorithm"}) + } + + if (l.DigestAlgorithm != DigestAlgorithmInvalid || l.LayerType == LayerTypeExclusive) && l.DigestValue == "" { + validationError = validationError.Append(ErrInvalidField{"digest_value"}) + } + + if l.DigestValue != "" && !l.DigestAlgorithm.Valid() { + validationError = validationError.Append(ErrInvalidField{"digest_algorithm"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func validateImageLayers(layers []*ImageLayer, legacyDownloadUser string) ValidationError { + var validationError ValidationError + + requiresLegacyDownloadUser := false + if len(layers) > 0 { + for _, layer := range layers { + err := layer.Validate() + if err != nil { + validationError = validationError.Append(ErrInvalidField{"image_layer"}) + validationError = validationError.Append(err) + } + + if layer.LayerType == LayerTypeExclusive { + requiresLegacyDownloadUser = true + } + } + } + + if requiresLegacyDownloadUser && legacyDownloadUser == "" { + validationError = validationError.Append(ErrInvalidField{"legacy_download_user"}) + } + + return validationError +} + +type ImageLayers []*ImageLayer + +func (layers ImageLayers) FilterByType(layerType ImageLayer_Type) ImageLayers { + var filtered ImageLayers + + for _, layer := range layers { + if layer.GetLayerType() == layerType { + filtered = append(filtered, layer) + } + } + return filtered +} + +func (layers ImageLayers) ToDownloadActions(legacyDownloadUser string, existingAction *Action) *Action { + downloadActions := []ActionInterface{} + + for _, layer := range layers.FilterByType(LayerTypeExclusive) { + digestAlgorithmName := strings.ToLower(layer.DigestAlgorithm.String()) + downloadActions = append(downloadActions, &DownloadAction{ + Artifact: layer.Name, + From: layer.Url, + To: layer.DestinationPath, + CacheKey: digestAlgorithmName + ":" + layer.DigestValue, // digest required for exclusive layers + User: legacyDownloadUser, + ChecksumAlgorithm: digestAlgorithmName, + ChecksumValue: layer.DigestValue, + }) + } + + var action *Action + if len(downloadActions) > 0 { + parallelDownloadActions := Parallel(downloadActions...) + if existingAction != nil { + action = WrapAction(Serial(parallelDownloadActions, UnwrapAction(existingAction))) + } else { + action = WrapAction(parallelDownloadActions) + } + } else { + action = existingAction + } + + return action +} + +func (layers ImageLayers) ToCachedDependencies() []*CachedDependency { + cachedDependencies := []*CachedDependency{} + for _, layer := range layers.FilterByType(LayerTypeShared) { + c := &CachedDependency{ + Name: layer.Name, + From: layer.Url, + To: layer.DestinationPath, + ChecksumValue: layer.DigestValue, + } + + if layer.DigestAlgorithm == DigestAlgorithmInvalid { + c.ChecksumAlgorithm = "" + } else { + c.ChecksumAlgorithm = strings.ToLower(layer.DigestAlgorithm.String()) + } + + if layer.DigestValue == "" { + c.CacheKey = layer.Url + } else { + c.CacheKey = c.ChecksumAlgorithm + ":" + layer.DigestValue + } + + cachedDependencies = append(cachedDependencies, c) + } + + return cachedDependencies +} + +func (d ImageLayer_DigestAlgorithm) Valid() bool { + switch d { + case DigestAlgorithmSha256: + return true + case DigestAlgorithmSha512: + return true + default: + return false + } +} + +func (m ImageLayer_MediaType) Valid() bool { + switch m { + case MediaTypeTar: + return true + case MediaTypeTgz: + return true + case MediaTypeZip: + return true + default: + return false + } +} + +func (t ImageLayer_Type) Valid() bool { + switch t { + case LayerTypeExclusive: + return true + case LayerTypeShared: + return true + default: + return false + } +} + +func (d *ImageLayer_DigestAlgorithm) UnmarshalJSON(data []byte) error { + var name string + if err := json.Unmarshal(data, &name); err != nil { + return err + } + + if v, found := ImageLayer_DigestAlgorithm_value[name]; found { + *d = ImageLayer_DigestAlgorithm(v) + return nil + } + return fmt.Errorf("invalid digest_algorithm: %s", name) +} + +func (d ImageLayer_DigestAlgorithm) MarshalJSON() ([]byte, error) { + return json.Marshal(d.String()) +} + +func (m *ImageLayer_MediaType) UnmarshalJSON(data []byte) error { + var name string + if err := json.Unmarshal(data, &name); err != nil { + return err + } + + if v, found := ImageLayer_MediaType_value[name]; found { + *m = ImageLayer_MediaType(v) + return nil + } + return fmt.Errorf("invalid media_type: %s", name) +} + +func (m ImageLayer_MediaType) MarshalJSON() ([]byte, error) { + return json.Marshal(m.String()) +} + +func (t *ImageLayer_Type) UnmarshalJSON(data []byte) error { + var name string + if err := json.Unmarshal(data, &name); err != nil { + return err + } + + if v, found := ImageLayer_Type_value[name]; found { + *t = ImageLayer_Type(v) + return nil + } + return fmt.Errorf("invalid type: %s", name) +} + +func (t ImageLayer_Type) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/image_layer.pb.go b/vendor/code.cloudfoundry.org/bbs/models/image_layer.pb.go new file mode 100644 index 00000000..8dda5974 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/image_layer.pb.go @@ -0,0 +1,788 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: image_layer.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ImageLayer_DigestAlgorithm int32 + +const ( + DigestAlgorithmInvalid ImageLayer_DigestAlgorithm = 0 + DigestAlgorithmSha256 ImageLayer_DigestAlgorithm = 1 + DigestAlgorithmSha512 ImageLayer_DigestAlgorithm = 2 // Deprecated: Do not use. +) + +var ImageLayer_DigestAlgorithm_name = map[int32]string{ + 0: "DigestAlgorithmInvalid", + 1: "SHA256", + 2: "SHA512", +} + +var ImageLayer_DigestAlgorithm_value = map[string]int32{ + "DigestAlgorithmInvalid": 0, + "SHA256": 1, + "SHA512": 2, +} + +func (ImageLayer_DigestAlgorithm) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_c089288d9f3c46a0, []int{0, 0} +} + +type ImageLayer_MediaType int32 + +const ( + MediaTypeInvalid ImageLayer_MediaType = 0 + MediaTypeTgz ImageLayer_MediaType = 1 + MediaTypeTar ImageLayer_MediaType = 2 + MediaTypeZip ImageLayer_MediaType = 3 +) + +var ImageLayer_MediaType_name = map[int32]string{ + 0: "MediaTypeInvalid", + 1: "TGZ", + 2: "TAR", + 3: "ZIP", +} + +var ImageLayer_MediaType_value = map[string]int32{ + "MediaTypeInvalid": 0, + "TGZ": 1, + "TAR": 2, + "ZIP": 3, +} + +func (ImageLayer_MediaType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_c089288d9f3c46a0, []int{0, 1} +} + +type ImageLayer_Type int32 + +const ( + LayerTypeInvalid ImageLayer_Type = 0 + LayerTypeShared ImageLayer_Type = 1 + LayerTypeExclusive ImageLayer_Type = 2 +) + +var ImageLayer_Type_name = map[int32]string{ + 0: "LayerTypeInvalid", + 1: "SHARED", + 2: "EXCLUSIVE", +} + +var ImageLayer_Type_value = map[string]int32{ + "LayerTypeInvalid": 0, + "SHARED": 1, + "EXCLUSIVE": 2, +} + +func (ImageLayer_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_c089288d9f3c46a0, []int{0, 2} +} + +type ImageLayer struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url"` + DestinationPath string `protobuf:"bytes,3,opt,name=destination_path,json=destinationPath,proto3" json:"destination_path"` + LayerType ImageLayer_Type `protobuf:"varint,4,opt,name=layer_type,json=layerType,proto3,enum=models.ImageLayer_Type" json:"layer_type"` + MediaType ImageLayer_MediaType `protobuf:"varint,5,opt,name=media_type,json=mediaType,proto3,enum=models.ImageLayer_MediaType" json:"media_type"` + DigestAlgorithm ImageLayer_DigestAlgorithm `protobuf:"varint,6,opt,name=digest_algorithm,json=digestAlgorithm,proto3,enum=models.ImageLayer_DigestAlgorithm" json:"digest_algorithm,omitempty"` + DigestValue string `protobuf:"bytes,7,opt,name=digest_value,json=digestValue,proto3" json:"digest_value,omitempty"` +} + +func (m *ImageLayer) Reset() { *m = ImageLayer{} } +func (*ImageLayer) ProtoMessage() {} +func (*ImageLayer) Descriptor() ([]byte, []int) { + return fileDescriptor_c089288d9f3c46a0, []int{0} +} +func (m *ImageLayer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageLayer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ImageLayer.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ImageLayer) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageLayer.Merge(m, src) +} +func (m *ImageLayer) XXX_Size() int { + return m.Size() +} +func (m *ImageLayer) XXX_DiscardUnknown() { + xxx_messageInfo_ImageLayer.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageLayer proto.InternalMessageInfo + +func (m *ImageLayer) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ImageLayer) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *ImageLayer) GetDestinationPath() string { + if m != nil { + return m.DestinationPath + } + return "" +} + +func (m *ImageLayer) GetLayerType() ImageLayer_Type { + if m != nil { + return m.LayerType + } + return LayerTypeInvalid +} + +func (m *ImageLayer) GetMediaType() ImageLayer_MediaType { + if m != nil { + return m.MediaType + } + return MediaTypeInvalid +} + +func (m *ImageLayer) GetDigestAlgorithm() ImageLayer_DigestAlgorithm { + if m != nil { + return m.DigestAlgorithm + } + return DigestAlgorithmInvalid +} + +func (m *ImageLayer) GetDigestValue() string { + if m != nil { + return m.DigestValue + } + return "" +} + +func init() { + proto.RegisterEnum("models.ImageLayer_DigestAlgorithm", ImageLayer_DigestAlgorithm_name, ImageLayer_DigestAlgorithm_value) + proto.RegisterEnum("models.ImageLayer_MediaType", ImageLayer_MediaType_name, ImageLayer_MediaType_value) + proto.RegisterEnum("models.ImageLayer_Type", ImageLayer_Type_name, ImageLayer_Type_value) + proto.RegisterType((*ImageLayer)(nil), "models.ImageLayer") +} + +func init() { proto.RegisterFile("image_layer.proto", fileDescriptor_c089288d9f3c46a0) } + +var fileDescriptor_c089288d9f3c46a0 = []byte{ + // 533 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x93, 0x41, 0x6f, 0x12, 0x41, + 0x14, 0xc7, 0x77, 0xa0, 0xa5, 0xf2, 0x6c, 0xca, 0x38, 0xd6, 0xba, 0xac, 0x66, 0x58, 0x49, 0x1a, + 0x7b, 0x91, 0xa6, 0x28, 0xbd, 0x1a, 0xb0, 0xa8, 0x44, 0x9a, 0x34, 0x0b, 0x36, 0x86, 0x0b, 0x19, + 0xba, 0xe3, 0xee, 0x26, 0xbb, 0x2c, 0x59, 0x16, 0x22, 0x26, 0x26, 0x9e, 0x39, 0xf9, 0x05, 0xb8, + 0xfb, 0x51, 0x3c, 0x72, 0xec, 0xc1, 0x10, 0x59, 0x2e, 0x86, 0x53, 0x3f, 0x82, 0xd9, 0x41, 0xa0, + 0xae, 0x5c, 0x36, 0xef, 0xfd, 0xff, 0xbf, 0xf9, 0xbf, 0x99, 0xd9, 0x0c, 0xdc, 0xb3, 0x1c, 0x66, + 0xf0, 0xa6, 0xcd, 0x06, 0xdc, 0xcb, 0x75, 0x3c, 0xd7, 0x77, 0x49, 0xc2, 0x71, 0x75, 0x6e, 0x77, + 0x95, 0x67, 0x86, 0xe5, 0x9b, 0xbd, 0x56, 0xee, 0xca, 0x75, 0x8e, 0x0d, 0xd7, 0x70, 0x8f, 0x85, + 0xdd, 0xea, 0x7d, 0x14, 0x9d, 0x68, 0x44, 0xb5, 0x58, 0x96, 0xfd, 0xb9, 0x0d, 0x50, 0x09, 0xc3, + 0xaa, 0x61, 0x16, 0x21, 0xb0, 0xd5, 0x66, 0x0e, 0x97, 0x91, 0x8a, 0x8e, 0x92, 0x9a, 0xa8, 0x49, + 0x1a, 0xe2, 0x3d, 0xcf, 0x96, 0x63, 0xa1, 0x54, 0xda, 0x99, 0x4f, 0x32, 0x61, 0xab, 0x85, 0x1f, + 0xf2, 0x12, 0xb0, 0xce, 0xbb, 0xbe, 0xd5, 0x66, 0xbe, 0xe5, 0xb6, 0x9b, 0x1d, 0xe6, 0x9b, 0x72, + 0x5c, 0x70, 0xfb, 0xf3, 0x49, 0xe6, 0x3f, 0x4f, 0x4b, 0xdd, 0x52, 0x2e, 0x98, 0x6f, 0x92, 0xd7, + 0x00, 0xe2, 0x10, 0x4d, 0x7f, 0xd0, 0xe1, 0xf2, 0x96, 0x8a, 0x8e, 0xf6, 0xf2, 0x0f, 0x73, 0x8b, + 0xa3, 0xe4, 0xd6, 0xfb, 0xca, 0xd5, 0x07, 0x1d, 0x5e, 0xda, 0x9b, 0x4f, 0x32, 0xb7, 0x70, 0x2d, + 0x29, 0xea, 0xd0, 0x22, 0xef, 0x00, 0x1c, 0xae, 0x5b, 0x6c, 0x91, 0xb3, 0x2d, 0x72, 0x1e, 0x6f, + 0xc8, 0x39, 0x0f, 0xa1, 0x75, 0xd8, 0x7a, 0x8d, 0x96, 0x74, 0x96, 0x16, 0x39, 0x07, 0xac, 0x5b, + 0x06, 0xef, 0xfa, 0x4d, 0x66, 0x1b, 0xae, 0x67, 0xf9, 0xa6, 0x23, 0x27, 0x44, 0x64, 0x76, 0x43, + 0xe4, 0x99, 0x40, 0x8b, 0x4b, 0x52, 0x4b, 0xe9, 0xff, 0x0a, 0xe4, 0x09, 0xec, 0xfe, 0x8d, 0xeb, + 0x33, 0xbb, 0xc7, 0xe5, 0x1d, 0x71, 0xb7, 0x77, 0x17, 0xda, 0x65, 0x28, 0x65, 0xbf, 0x40, 0x2a, + 0x12, 0x43, 0x14, 0x38, 0x88, 0x48, 0x95, 0x76, 0x9f, 0xd9, 0x96, 0x8e, 0x25, 0x72, 0x08, 0x89, + 0xda, 0xdb, 0x62, 0xbe, 0x70, 0x8a, 0x91, 0x92, 0x1e, 0x8e, 0xd4, 0x07, 0x11, 0xb2, 0x66, 0xb2, + 0x7c, 0xe1, 0x94, 0x3c, 0x15, 0x58, 0xe1, 0x24, 0x8f, 0x63, 0xca, 0xa3, 0xcd, 0x58, 0xe1, 0x24, + 0x7f, 0x07, 0x65, 0x3d, 0x48, 0xae, 0x2e, 0x86, 0xec, 0x03, 0x5e, 0x35, 0xeb, 0x91, 0x69, 0x88, + 0xd7, 0xdf, 0x34, 0x30, 0x52, 0xf0, 0x70, 0xa4, 0xee, 0xae, 0x80, 0xba, 0xf1, 0x59, 0x58, 0x45, + 0x0d, 0xc7, 0xa2, 0x16, 0xf3, 0x42, 0xab, 0x51, 0xb9, 0xc0, 0xf1, 0x88, 0xd5, 0xb0, 0x3a, 0x59, + 0x1d, 0xb6, 0x96, 0xe3, 0xaa, 0xcb, 0xdf, 0xb8, 0x1e, 0x97, 0x11, 0x5b, 0xd7, 0xca, 0x67, 0x18, + 0x29, 0xf7, 0x87, 0x23, 0x35, 0xb5, 0x62, 0x6a, 0x26, 0xf3, 0xb8, 0x4e, 0x0e, 0x21, 0x59, 0xfe, + 0xf0, 0xaa, 0xfa, 0xbe, 0x56, 0xb9, 0x2c, 0xe3, 0x98, 0x72, 0x30, 0x1c, 0xa9, 0x64, 0xc5, 0x94, + 0x3f, 0x5d, 0xd9, 0xbd, 0xae, 0xd5, 0xe7, 0xa5, 0x17, 0xe3, 0x29, 0x95, 0xae, 0xa7, 0x54, 0xba, + 0x99, 0x52, 0xf4, 0x35, 0xa0, 0xe8, 0x7b, 0x40, 0xd1, 0x8f, 0x80, 0xa2, 0x71, 0x40, 0xd1, 0xaf, + 0x80, 0xa2, 0xdf, 0x01, 0x95, 0x6e, 0x02, 0x8a, 0xbe, 0xcd, 0xa8, 0x34, 0x9e, 0x51, 0xe9, 0x7a, + 0x46, 0xa5, 0x56, 0x42, 0xbc, 0x8d, 0xe7, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x5a, 0x90, 0xc9, + 0x42, 0x67, 0x03, 0x00, 0x00, +} + +func (x ImageLayer_DigestAlgorithm) String() string { + s, ok := ImageLayer_DigestAlgorithm_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x ImageLayer_MediaType) String() string { + s, ok := ImageLayer_MediaType_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x ImageLayer_Type) String() string { + s, ok := ImageLayer_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *ImageLayer) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ImageLayer) + if !ok { + that2, ok := that.(ImageLayer) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Url != that1.Url { + return false + } + if this.DestinationPath != that1.DestinationPath { + return false + } + if this.LayerType != that1.LayerType { + return false + } + if this.MediaType != that1.MediaType { + return false + } + if this.DigestAlgorithm != that1.DigestAlgorithm { + return false + } + if this.DigestValue != that1.DigestValue { + return false + } + return true +} +func (this *ImageLayer) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&models.ImageLayer{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Url: "+fmt.Sprintf("%#v", this.Url)+",\n") + s = append(s, "DestinationPath: "+fmt.Sprintf("%#v", this.DestinationPath)+",\n") + s = append(s, "LayerType: "+fmt.Sprintf("%#v", this.LayerType)+",\n") + s = append(s, "MediaType: "+fmt.Sprintf("%#v", this.MediaType)+",\n") + s = append(s, "DigestAlgorithm: "+fmt.Sprintf("%#v", this.DigestAlgorithm)+",\n") + s = append(s, "DigestValue: "+fmt.Sprintf("%#v", this.DigestValue)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringImageLayer(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *ImageLayer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageLayer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageLayer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DigestValue) > 0 { + i -= len(m.DigestValue) + copy(dAtA[i:], m.DigestValue) + i = encodeVarintImageLayer(dAtA, i, uint64(len(m.DigestValue))) + i-- + dAtA[i] = 0x3a + } + if m.DigestAlgorithm != 0 { + i = encodeVarintImageLayer(dAtA, i, uint64(m.DigestAlgorithm)) + i-- + dAtA[i] = 0x30 + } + if m.MediaType != 0 { + i = encodeVarintImageLayer(dAtA, i, uint64(m.MediaType)) + i-- + dAtA[i] = 0x28 + } + if m.LayerType != 0 { + i = encodeVarintImageLayer(dAtA, i, uint64(m.LayerType)) + i-- + dAtA[i] = 0x20 + } + if len(m.DestinationPath) > 0 { + i -= len(m.DestinationPath) + copy(dAtA[i:], m.DestinationPath) + i = encodeVarintImageLayer(dAtA, i, uint64(len(m.DestinationPath))) + i-- + dAtA[i] = 0x1a + } + if len(m.Url) > 0 { + i -= len(m.Url) + copy(dAtA[i:], m.Url) + i = encodeVarintImageLayer(dAtA, i, uint64(len(m.Url))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintImageLayer(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintImageLayer(dAtA []byte, offset int, v uint64) int { + offset -= sovImageLayer(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ImageLayer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovImageLayer(uint64(l)) + } + l = len(m.Url) + if l > 0 { + n += 1 + l + sovImageLayer(uint64(l)) + } + l = len(m.DestinationPath) + if l > 0 { + n += 1 + l + sovImageLayer(uint64(l)) + } + if m.LayerType != 0 { + n += 1 + sovImageLayer(uint64(m.LayerType)) + } + if m.MediaType != 0 { + n += 1 + sovImageLayer(uint64(m.MediaType)) + } + if m.DigestAlgorithm != 0 { + n += 1 + sovImageLayer(uint64(m.DigestAlgorithm)) + } + l = len(m.DigestValue) + if l > 0 { + n += 1 + l + sovImageLayer(uint64(l)) + } + return n +} + +func sovImageLayer(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozImageLayer(x uint64) (n int) { + return sovImageLayer(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ImageLayer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageLayer{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Url:` + fmt.Sprintf("%v", this.Url) + `,`, + `DestinationPath:` + fmt.Sprintf("%v", this.DestinationPath) + `,`, + `LayerType:` + fmt.Sprintf("%v", this.LayerType) + `,`, + `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`, + `DigestAlgorithm:` + fmt.Sprintf("%v", this.DigestAlgorithm) + `,`, + `DigestValue:` + fmt.Sprintf("%v", this.DigestValue) + `,`, + `}`, + }, "") + return s +} +func valueToStringImageLayer(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ImageLayer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImageLayer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageLayer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageLayer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImageLayer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthImageLayer + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthImageLayer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Url", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImageLayer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthImageLayer + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthImageLayer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Url = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImageLayer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthImageLayer + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthImageLayer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LayerType", wireType) + } + m.LayerType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImageLayer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LayerType |= ImageLayer_Type(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType) + } + m.MediaType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImageLayer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MediaType |= ImageLayer_MediaType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DigestAlgorithm", wireType) + } + m.DigestAlgorithm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImageLayer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DigestAlgorithm |= ImageLayer_DigestAlgorithm(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DigestValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImageLayer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthImageLayer + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthImageLayer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DigestValue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipImageLayer(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthImageLayer + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipImageLayer(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowImageLayer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowImageLayer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowImageLayer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthImageLayer + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupImageLayer + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthImageLayer + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthImageLayer = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowImageLayer = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupImageLayer = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/image_layer.proto b/vendor/code.cloudfoundry.org/bbs/models/image_layer.proto new file mode 100644 index 00000000..07b64913 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/image_layer.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +message ImageLayer { + enum DigestAlgorithm { + DigestAlgorithmInvalid = 0; // not camel cased since it isn't supposed to be used by API users + SHA256 = 1 [(gogoproto.enumvalue_customname) = "DigestAlgorithmSha256"]; + SHA512 = 2 [(gogoproto.enumvalue_customname) = "DigestAlgorithmSha512", deprecated=true]; + } + + enum MediaType { + MediaTypeInvalid = 0; // not camel cased since it isn't supposed to be used by API users + TGZ = 1 [(gogoproto.enumvalue_customname) = "MediaTypeTgz"]; + TAR = 2 [(gogoproto.enumvalue_customname) = "MediaTypeTar"]; + ZIP = 3 [(gogoproto.enumvalue_customname) = "MediaTypeZip"]; + } + + enum Type { + LayerTypeInvalid = 0; // not camel cased since it isn't supposed to be used by API users + SHARED = 1 [(gogoproto.enumvalue_customname) = "LayerTypeShared"]; + EXCLUSIVE = 2 [(gogoproto.enumvalue_customname) = "LayerTypeExclusive"]; + } + + string name = 1; + string url = 2 [(gogoproto.jsontag) = "url"]; + string destination_path = 3 [(gogoproto.jsontag) = "destination_path"]; + Type layer_type = 4 [(gogoproto.jsontag) = "layer_type"]; + MediaType media_type = 5 [(gogoproto.jsontag) = "media_type"]; + DigestAlgorithm digest_algorithm = 6; + string digest_value = 7; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/json.go b/vendor/code.cloudfoundry.org/bbs/models/json.go new file mode 100644 index 00000000..208d41bf --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/json.go @@ -0,0 +1,54 @@ +package models + +import ( + "encoding/json" + "reflect" +) + +func FromJSON(payload []byte, v Validator) error { + err := json.Unmarshal(payload, v) + if err != nil { + return err + } + return v.Validate() +} + +func ToJSON(v Validator) ([]byte, error) { + if isNil(v) { + return json.Marshal(v) + } + + if err := v.Validate(); err != nil { + return []byte{}, err + } + + return json.Marshal(v) +} + +func ToJSONArray(vs ...Validator) ([]byte, error) { + msgs := make([]*json.RawMessage, len(vs)) + + for i, v := range vs { + msg, err := ToJSON(v) + if err != nil { + return nil, err + } + + msgs[i] = (*json.RawMessage)(&msg) + } + + return json.Marshal(msgs) +} + +func isNil(a interface{}) bool { + if a == nil { + return true + } + + switch reflect.TypeOf(a).Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return reflect.ValueOf(a).IsNil() + } + + return false +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/log_rate_limit.pb.go b/vendor/code.cloudfoundry.org/bbs/models/log_rate_limit.pb.go new file mode 100644 index 00000000..0ccdf6db --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/log_rate_limit.pb.go @@ -0,0 +1,360 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: log_rate_limit.proto + +package models + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type LogRateLimit struct { + BytesPerSecond int64 `protobuf:"varint,1,opt,name=bytes_per_second,json=bytesPerSecond,proto3" json:"bytes_per_second,omitempty"` +} + +func (m *LogRateLimit) Reset() { *m = LogRateLimit{} } +func (*LogRateLimit) ProtoMessage() {} +func (*LogRateLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_bfeb7b5141d983ba, []int{0} +} +func (m *LogRateLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LogRateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LogRateLimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LogRateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogRateLimit.Merge(m, src) +} +func (m *LogRateLimit) XXX_Size() int { + return m.Size() +} +func (m *LogRateLimit) XXX_DiscardUnknown() { + xxx_messageInfo_LogRateLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_LogRateLimit proto.InternalMessageInfo + +func (m *LogRateLimit) GetBytesPerSecond() int64 { + if m != nil { + return m.BytesPerSecond + } + return 0 +} + +func init() { + proto.RegisterType((*LogRateLimit)(nil), "models.LogRateLimit") +} + +func init() { proto.RegisterFile("log_rate_limit.proto", fileDescriptor_bfeb7b5141d983ba) } + +var fileDescriptor_bfeb7b5141d983ba = []byte{ + // 166 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xc9, 0xc9, 0x4f, 0x8f, + 0x2f, 0x4a, 0x2c, 0x49, 0x8d, 0xcf, 0xc9, 0xcc, 0xcd, 0x2c, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, + 0x17, 0x62, 0xcb, 0xcd, 0x4f, 0x49, 0xcd, 0x29, 0x56, 0xb2, 0xe0, 0xe2, 0xf1, 0xc9, 0x4f, 0x0f, + 0x4a, 0x2c, 0x49, 0xf5, 0x01, 0xc9, 0x0a, 0x69, 0x70, 0x09, 0x24, 0x55, 0x96, 0xa4, 0x16, 0xc7, + 0x17, 0xa4, 0x16, 0xc5, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x30, + 0x07, 0xf1, 0x81, 0xc5, 0x03, 0x52, 0x8b, 0x82, 0xc1, 0xa2, 0x4e, 0x26, 0x17, 0x1e, 0xca, 0x31, + 0xdc, 0x78, 0x28, 0xc7, 0xf0, 0xe1, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, 0xc6, 0x15, 0x8f, 0xe4, + 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x17, 0x8f, + 0xe4, 0x18, 0x3e, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, + 0x8f, 0xe5, 0x18, 0x92, 0xd8, 0xc0, 0xd6, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xba, 0xbc, + 0x7d, 0xdf, 0x96, 0x00, 0x00, 0x00, +} + +func (this *LogRateLimit) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*LogRateLimit) + if !ok { + that2, ok := that.(LogRateLimit) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.BytesPerSecond != that1.BytesPerSecond { + return false + } + return true +} +func (this *LogRateLimit) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.LogRateLimit{") + s = append(s, "BytesPerSecond: "+fmt.Sprintf("%#v", this.BytesPerSecond)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringLogRateLimit(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *LogRateLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogRateLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LogRateLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BytesPerSecond != 0 { + i = encodeVarintLogRateLimit(dAtA, i, uint64(m.BytesPerSecond)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintLogRateLimit(dAtA []byte, offset int, v uint64) int { + offset -= sovLogRateLimit(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *LogRateLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BytesPerSecond != 0 { + n += 1 + sovLogRateLimit(uint64(m.BytesPerSecond)) + } + return n +} + +func sovLogRateLimit(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozLogRateLimit(x uint64) (n int) { + return sovLogRateLimit(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *LogRateLimit) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LogRateLimit{`, + `BytesPerSecond:` + fmt.Sprintf("%v", this.BytesPerSecond) + `,`, + `}`, + }, "") + return s +} +func valueToStringLogRateLimit(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *LogRateLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogRateLimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogRateLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogRateLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BytesPerSecond", wireType) + } + m.BytesPerSecond = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogRateLimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BytesPerSecond |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipLogRateLimit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLogRateLimit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipLogRateLimit(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogRateLimit + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogRateLimit + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogRateLimit + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthLogRateLimit + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupLogRateLimit + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthLogRateLimit + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthLogRateLimit = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowLogRateLimit = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupLogRateLimit = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/log_rate_limit.proto b/vendor/code.cloudfoundry.org/bbs/models/log_rate_limit.proto new file mode 100644 index 00000000..7248103b --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/log_rate_limit.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; + +package models; + +message LogRateLimit { + int64 bytes_per_second = 1; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/lrp_convergence.go b/vendor/code.cloudfoundry.org/bbs/models/lrp_convergence.go new file mode 100644 index 00000000..fda65b52 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/lrp_convergence.go @@ -0,0 +1,6 @@ +package models + +type ActualLRPKeyWithSchedulingInfo struct { + Key *ActualLRPKey + SchedulingInfo *DesiredLRPSchedulingInfo +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/metric_tags.go b/vendor/code.cloudfoundry.org/bbs/models/metric_tags.go new file mode 100644 index 00000000..2c1e246b --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/metric_tags.go @@ -0,0 +1,101 @@ +package models + +import ( + "encoding/json" + "fmt" + "strconv" +) + +func (m *MetricTagValue) Validate() error { + var validationError ValidationError + + if m.Static != "" && m.Dynamic.Valid() { + validationError = validationError.Append(ErrInvalidField{"static"}) + validationError = validationError.Append(ErrInvalidField{"dynamic"}) + } + + if m.Static == "" && !m.Dynamic.Valid() { + validationError = validationError.Append(ErrInvalidField{"static"}) + validationError = validationError.Append(ErrInvalidField{"dynamic"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (v MetricTagValue_DynamicValue) Valid() bool { + switch v { + case MetricTagDynamicValueIndex: + return true + case MetricTagDynamicValueInstanceGuid: + return true + default: + return false + } +} + +func ConvertMetricTags(metricTags map[string]*MetricTagValue, info map[MetricTagValue_DynamicValue]interface{}) (map[string]string, error) { + tags := make(map[string]string) + for k, v := range metricTags { + if v.Dynamic > 0 { + switch v.Dynamic { + case MetricTagDynamicValueIndex: + val, ok := info[MetricTagDynamicValueIndex].(int32) + if !ok { + return nil, fmt.Errorf("could not convert value %+v of type %T to int32", info[MetricTagDynamicValueIndex], info[MetricTagDynamicValueIndex]) + } + tags[k] = strconv.FormatInt(int64(val), 10) + case MetricTagDynamicValueInstanceGuid: + val, ok := info[MetricTagDynamicValueInstanceGuid].(string) + if !ok { + return nil, fmt.Errorf("could not convert value %+v of type %T to string", info[MetricTagDynamicValueInstanceGuid], info[MetricTagDynamicValueInstanceGuid]) + } + tags[k] = val + } + } else { + tags[k] = v.Static + } + } + return tags, nil +} + +func validateMetricTags(m map[string]*MetricTagValue, metricsGuid string) ValidationError { + var validationError ValidationError + + for _, v := range m { + err := v.Validate() + if err != nil { + validationError = validationError.Append(err) + } + } + + if len(m) > 0 && metricsGuid != "" { + if source_id, ok := m["source_id"]; !ok || source_id.Static != metricsGuid { + validationError = validationError.Append(ErrInvalidField{"source_id should match metrics_guid"}) + } + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (v *MetricTagValue_DynamicValue) UnmarshalJSON(data []byte) error { + var name string + if err := json.Unmarshal(data, &name); err != nil { + return err + } + + *v = MetricTagValue_DynamicValue(MetricTagValue_DynamicValue_value[name]) + + return nil +} + +func (v MetricTagValue_DynamicValue) MarshalJSON() ([]byte, error) { + return json.Marshal(MetricTagValue_DynamicValue_name[int32(v)]) +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/metric_tags.pb.go b/vendor/code.cloudfoundry.org/bbs/models/metric_tags.pb.go new file mode 100644 index 00000000..1ab472f0 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/metric_tags.pb.go @@ -0,0 +1,460 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: metric_tags.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type MetricTagValue_DynamicValue int32 + +const ( + DynamicValueInvalid MetricTagValue_DynamicValue = 0 + MetricTagDynamicValueIndex MetricTagValue_DynamicValue = 1 + MetricTagDynamicValueInstanceGuid MetricTagValue_DynamicValue = 2 +) + +var MetricTagValue_DynamicValue_name = map[int32]string{ + 0: "DynamicValueInvalid", + 1: "INDEX", + 2: "INSTANCE_GUID", +} + +var MetricTagValue_DynamicValue_value = map[string]int32{ + "DynamicValueInvalid": 0, + "INDEX": 1, + "INSTANCE_GUID": 2, +} + +func (MetricTagValue_DynamicValue) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_6fa2ee0541447d5e, []int{0, 0} +} + +type MetricTagValue struct { + // Note: we only expect one of the following set of fields to be + // set. + Static string `protobuf:"bytes,1,opt,name=static,proto3" json:"static,omitempty"` + Dynamic MetricTagValue_DynamicValue `protobuf:"varint,2,opt,name=dynamic,proto3,enum=models.MetricTagValue_DynamicValue" json:"dynamic,omitempty"` +} + +func (m *MetricTagValue) Reset() { *m = MetricTagValue{} } +func (*MetricTagValue) ProtoMessage() {} +func (*MetricTagValue) Descriptor() ([]byte, []int) { + return fileDescriptor_6fa2ee0541447d5e, []int{0} +} +func (m *MetricTagValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricTagValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MetricTagValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MetricTagValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricTagValue.Merge(m, src) +} +func (m *MetricTagValue) XXX_Size() int { + return m.Size() +} +func (m *MetricTagValue) XXX_DiscardUnknown() { + xxx_messageInfo_MetricTagValue.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricTagValue proto.InternalMessageInfo + +func (m *MetricTagValue) GetStatic() string { + if m != nil { + return m.Static + } + return "" +} + +func (m *MetricTagValue) GetDynamic() MetricTagValue_DynamicValue { + if m != nil { + return m.Dynamic + } + return DynamicValueInvalid +} + +func init() { + proto.RegisterEnum("models.MetricTagValue_DynamicValue", MetricTagValue_DynamicValue_name, MetricTagValue_DynamicValue_value) + proto.RegisterType((*MetricTagValue)(nil), "models.MetricTagValue") +} + +func init() { proto.RegisterFile("metric_tags.proto", fileDescriptor_6fa2ee0541447d5e) } + +var fileDescriptor_6fa2ee0541447d5e = []byte{ + // 296 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xcc, 0x4d, 0x2d, 0x29, + 0xca, 0x4c, 0x8e, 0x2f, 0x49, 0x4c, 0x2f, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0xcb, + 0xcd, 0x4f, 0x49, 0xcd, 0x29, 0x96, 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, + 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, 0xcf, 0xd7, 0x07, 0x4b, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, + 0x98, 0x05, 0xd1, 0xa6, 0xf4, 0x8d, 0x91, 0x8b, 0xcf, 0x17, 0x6c, 0x58, 0x48, 0x62, 0x7a, 0x58, + 0x62, 0x4e, 0x69, 0xaa, 0x90, 0x18, 0x17, 0x5b, 0x71, 0x49, 0x62, 0x49, 0x66, 0xb2, 0x04, 0xa3, + 0x02, 0xa3, 0x06, 0x67, 0x10, 0x94, 0x27, 0x64, 0xcb, 0xc5, 0x9e, 0x52, 0x99, 0x97, 0x98, 0x9b, + 0x99, 0x2c, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x67, 0xa4, 0xac, 0x07, 0xb1, 0x53, 0x0f, 0xd5, 0x00, + 0x3d, 0x17, 0x88, 0x2a, 0x30, 0x27, 0x08, 0xa6, 0x47, 0xa9, 0x87, 0x91, 0x8b, 0x07, 0x59, 0x46, + 0x48, 0x9c, 0x4b, 0x18, 0x99, 0xef, 0x99, 0x57, 0x96, 0x98, 0x93, 0x99, 0x22, 0xc0, 0x20, 0xa4, + 0xc9, 0xc5, 0xea, 0xe9, 0xe7, 0xe2, 0x1a, 0x21, 0xc0, 0x28, 0x25, 0xd7, 0x35, 0x57, 0x41, 0x0a, + 0x6e, 0x3c, 0xaa, 0xf2, 0x94, 0xd4, 0x0a, 0x21, 0x0b, 0x2e, 0x5e, 0x4f, 0xbf, 0xe0, 0x10, 0x47, + 0x3f, 0x67, 0xd7, 0x78, 0xf7, 0x50, 0x4f, 0x17, 0x01, 0x26, 0x29, 0xd5, 0xae, 0xb9, 0x0a, 0x8a, + 0x38, 0xb4, 0x14, 0x97, 0x24, 0xe6, 0x25, 0xa7, 0xba, 0x97, 0x66, 0xa6, 0x38, 0x99, 0x5c, 0x78, + 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0xc3, 0x87, 0x87, 0x72, 0x8c, 0x0d, 0x8f, 0xe4, 0x18, 0x57, + 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, + 0x5f, 0x3c, 0x92, 0x63, 0xf8, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, + 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x48, 0x62, 0x03, 0x87, 0x9a, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, + 0x9b, 0x83, 0x77, 0xc8, 0x81, 0x01, 0x00, 0x00, +} + +func (x MetricTagValue_DynamicValue) String() string { + s, ok := MetricTagValue_DynamicValue_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *MetricTagValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*MetricTagValue) + if !ok { + that2, ok := that.(MetricTagValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Static != that1.Static { + return false + } + if this.Dynamic != that1.Dynamic { + return false + } + return true +} +func (this *MetricTagValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.MetricTagValue{") + s = append(s, "Static: "+fmt.Sprintf("%#v", this.Static)+",\n") + s = append(s, "Dynamic: "+fmt.Sprintf("%#v", this.Dynamic)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringMetricTags(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *MetricTagValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricTagValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricTagValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Dynamic != 0 { + i = encodeVarintMetricTags(dAtA, i, uint64(m.Dynamic)) + i-- + dAtA[i] = 0x10 + } + if len(m.Static) > 0 { + i -= len(m.Static) + copy(dAtA[i:], m.Static) + i = encodeVarintMetricTags(dAtA, i, uint64(len(m.Static))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintMetricTags(dAtA []byte, offset int, v uint64) int { + offset -= sovMetricTags(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MetricTagValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Static) + if l > 0 { + n += 1 + l + sovMetricTags(uint64(l)) + } + if m.Dynamic != 0 { + n += 1 + sovMetricTags(uint64(m.Dynamic)) + } + return n +} + +func sovMetricTags(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMetricTags(x uint64) (n int) { + return sovMetricTags(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *MetricTagValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricTagValue{`, + `Static:` + fmt.Sprintf("%v", this.Static) + `,`, + `Dynamic:` + fmt.Sprintf("%v", this.Dynamic) + `,`, + `}`, + }, "") + return s +} +func valueToStringMetricTags(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *MetricTagValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetricTags + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricTagValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricTagValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Static", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetricTags + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetricTags + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetricTags + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Static = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Dynamic", wireType) + } + m.Dynamic = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetricTags + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Dynamic |= MetricTagValue_DynamicValue(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetricTags(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetricTags + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMetricTags(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetricTags + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetricTags + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetricTags + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMetricTags + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMetricTags + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMetricTags + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMetricTags = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMetricTags = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMetricTags = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/metric_tags.proto b/vendor/code.cloudfoundry.org/bbs/models/metric_tags.proto new file mode 100644 index 00000000..18293e0f --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/metric_tags.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +message MetricTagValue { + enum DynamicValue { + DynamicValueInvalid = 0; + INDEX = 1 [(gogoproto.enumvalue_customname) = "MetricTagDynamicValueIndex"]; + INSTANCE_GUID = 2 [(gogoproto.enumvalue_customname) = "MetricTagDynamicValueInstanceGuid"]; + } + + // Note: we only expect one of the following set of fields to be + // set. + string static = 1; + DynamicValue dynamic = 2; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/models.go b/vendor/code.cloudfoundry.org/bbs/models/models.go new file mode 100644 index 00000000..5c8c93ab --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/models.go @@ -0,0 +1,8 @@ +package models + +//go:generate bash ../scripts/generate_protos.sh + +const ( + maximumAnnotationLength = 10 * 1024 + maximumRouteLength = 128 * 1024 +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/modification_tag.go b/vendor/code.cloudfoundry.org/bbs/models/modification_tag.go new file mode 100644 index 00000000..d9d4aaa1 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/modification_tag.go @@ -0,0 +1,20 @@ +package models + +func NewModificationTag(epoch string, index uint32) ModificationTag { + return ModificationTag{ + Epoch: epoch, + Index: index, + } +} + +func (t *ModificationTag) Increment() { + t.Index++ +} + +func (m *ModificationTag) SucceededBy(other *ModificationTag) bool { + if m == nil || m.Epoch == "" || other.Epoch == "" { + return true + } + + return m.Epoch != other.Epoch || m.Index < other.Index +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/modification_tag.pb.go b/vendor/code.cloudfoundry.org/bbs/models/modification_tag.pb.go new file mode 100644 index 00000000..e1fe576e --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/modification_tag.pb.go @@ -0,0 +1,419 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: modification_tag.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ModificationTag struct { + Epoch string `protobuf:"bytes,1,opt,name=epoch,proto3" json:"epoch"` + Index uint32 `protobuf:"varint,2,opt,name=index,proto3" json:"index"` +} + +func (m *ModificationTag) Reset() { *m = ModificationTag{} } +func (*ModificationTag) ProtoMessage() {} +func (*ModificationTag) Descriptor() ([]byte, []int) { + return fileDescriptor_b84c9c806e96b4e3, []int{0} +} +func (m *ModificationTag) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ModificationTag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ModificationTag.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ModificationTag) XXX_Merge(src proto.Message) { + xxx_messageInfo_ModificationTag.Merge(m, src) +} +func (m *ModificationTag) XXX_Size() int { + return m.Size() +} +func (m *ModificationTag) XXX_DiscardUnknown() { + xxx_messageInfo_ModificationTag.DiscardUnknown(m) +} + +var xxx_messageInfo_ModificationTag proto.InternalMessageInfo + +func (m *ModificationTag) GetEpoch() string { + if m != nil { + return m.Epoch + } + return "" +} + +func (m *ModificationTag) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +func init() { + proto.RegisterType((*ModificationTag)(nil), "models.ModificationTag") +} + +func init() { proto.RegisterFile("modification_tag.proto", fileDescriptor_b84c9c806e96b4e3) } + +var fileDescriptor_b84c9c806e96b4e3 = []byte{ + // 203 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xcb, 0xcd, 0x4f, 0xc9, + 0x4c, 0xcb, 0x4c, 0x4e, 0x2c, 0xc9, 0xcc, 0xcf, 0x8b, 0x2f, 0x49, 0x4c, 0xd7, 0x2b, 0x28, 0xca, + 0x2f, 0xc9, 0x17, 0x62, 0xcb, 0xcd, 0x4f, 0x49, 0xcd, 0x29, 0x96, 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, 0xcf, 0xd7, 0x07, 0x4b, 0x27, 0x95, + 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xa6, 0x14, 0xcc, 0xc5, 0xef, 0x8b, 0x64, 0x60, + 0x48, 0x62, 0xba, 0x90, 0x3c, 0x17, 0x6b, 0x6a, 0x41, 0x7e, 0x72, 0x86, 0x04, 0xa3, 0x02, 0xa3, + 0x06, 0xa7, 0x13, 0xe7, 0xab, 0x7b, 0xf2, 0x10, 0x81, 0x20, 0x08, 0x05, 0x52, 0x90, 0x99, 0x97, + 0x92, 0x5a, 0x21, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x0b, 0x51, 0x00, 0x16, 0x08, 0x82, 0x50, 0x4e, + 0x26, 0x17, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xf0, 0xe1, 0xa1, 0x1c, 0x63, 0xc3, 0x23, + 0x39, 0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, + 0x23, 0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, + 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0x92, 0xd8, 0xc0, 0x2e, 0x32, 0x06, 0x04, 0x00, + 0x00, 0xff, 0xff, 0x12, 0xa6, 0xaa, 0xa3, 0xe2, 0x00, 0x00, 0x00, +} + +func (this *ModificationTag) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ModificationTag) + if !ok { + that2, ok := that.(ModificationTag) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Epoch != that1.Epoch { + return false + } + if this.Index != that1.Index { + return false + } + return true +} +func (this *ModificationTag) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ModificationTag{") + s = append(s, "Epoch: "+fmt.Sprintf("%#v", this.Epoch)+",\n") + s = append(s, "Index: "+fmt.Sprintf("%#v", this.Index)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringModificationTag(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *ModificationTag) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ModificationTag) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ModificationTag) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Index != 0 { + i = encodeVarintModificationTag(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x10 + } + if len(m.Epoch) > 0 { + i -= len(m.Epoch) + copy(dAtA[i:], m.Epoch) + i = encodeVarintModificationTag(dAtA, i, uint64(len(m.Epoch))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintModificationTag(dAtA []byte, offset int, v uint64) int { + offset -= sovModificationTag(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ModificationTag) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Epoch) + if l > 0 { + n += 1 + l + sovModificationTag(uint64(l)) + } + if m.Index != 0 { + n += 1 + sovModificationTag(uint64(m.Index)) + } + return n +} + +func sovModificationTag(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozModificationTag(x uint64) (n int) { + return sovModificationTag(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ModificationTag) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ModificationTag{`, + `Epoch:` + fmt.Sprintf("%v", this.Epoch) + `,`, + `Index:` + fmt.Sprintf("%v", this.Index) + `,`, + `}`, + }, "") + return s +} +func valueToStringModificationTag(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ModificationTag) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowModificationTag + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ModificationTag: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ModificationTag: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Epoch", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowModificationTag + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthModificationTag + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthModificationTag + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Epoch = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowModificationTag + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipModificationTag(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthModificationTag + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipModificationTag(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowModificationTag + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowModificationTag + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowModificationTag + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthModificationTag + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupModificationTag + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthModificationTag + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthModificationTag = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowModificationTag = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupModificationTag = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/modification_tag.proto b/vendor/code.cloudfoundry.org/bbs/models/modification_tag.proto new file mode 100644 index 00000000..0b413b54 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/modification_tag.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +message ModificationTag { + string epoch = 1 [(gogoproto.jsontag) = "epoch"]; + uint32 index = 2 [(gogoproto.jsontag) = "index"]; +} + diff --git a/vendor/code.cloudfoundry.org/bbs/models/network.pb.go b/vendor/code.cloudfoundry.org/bbs/models/network.pb.go new file mode 100644 index 00000000..6bd2eb70 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/network.pb.go @@ -0,0 +1,522 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: network.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Network struct { + Properties map[string]string `protobuf:"bytes,1,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *Network) Reset() { *m = Network{} } +func (*Network) ProtoMessage() {} +func (*Network) Descriptor() ([]byte, []int) { + return fileDescriptor_8571034d60397816, []int{0} +} +func (m *Network) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Network) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Network.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Network) XXX_Merge(src proto.Message) { + xxx_messageInfo_Network.Merge(m, src) +} +func (m *Network) XXX_Size() int { + return m.Size() +} +func (m *Network) XXX_DiscardUnknown() { + xxx_messageInfo_Network.DiscardUnknown(m) +} + +var xxx_messageInfo_Network proto.InternalMessageInfo + +func (m *Network) GetProperties() map[string]string { + if m != nil { + return m.Properties + } + return nil +} + +func init() { + proto.RegisterType((*Network)(nil), "models.Network") + proto.RegisterMapType((map[string]string)(nil), "models.Network.PropertiesEntry") +} + +func init() { proto.RegisterFile("network.proto", fileDescriptor_8571034d60397816) } + +var fileDescriptor_8571034d60397816 = []byte{ + // 247 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xcd, 0x4b, 0x2d, 0x29, + 0xcf, 0x2f, 0xca, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0xcb, 0xcd, 0x4f, 0x49, 0xcd, + 0x29, 0x96, 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, + 0x4f, 0xcf, 0xd7, 0x07, 0x4b, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xa6, + 0xb4, 0x98, 0x91, 0x8b, 0xdd, 0x0f, 0x62, 0x90, 0x50, 0x24, 0x17, 0x57, 0x41, 0x51, 0x7e, 0x41, + 0x6a, 0x51, 0x49, 0x66, 0x6a, 0xb1, 0x04, 0xa3, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0xbc, 0x1e, 0xc4, + 0x5c, 0x3d, 0xa8, 0x22, 0xbd, 0x00, 0xb8, 0x0a, 0xd7, 0xbc, 0x92, 0xa2, 0x4a, 0x27, 0x89, 0x57, + 0xf7, 0xe4, 0x45, 0x10, 0xda, 0x74, 0xf2, 0x73, 0x33, 0x4b, 0x52, 0x73, 0x0b, 0x4a, 0x2a, 0x83, + 0x90, 0x0c, 0x93, 0xb2, 0xe5, 0xe2, 0x47, 0xd3, 0x28, 0x24, 0xc0, 0xc5, 0x9c, 0x9d, 0x5a, 0x29, + 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x62, 0x0a, 0x89, 0x70, 0xb1, 0x96, 0x25, 0xe6, 0x94, + 0xa6, 0x4a, 0x30, 0x81, 0xc5, 0x20, 0x1c, 0x2b, 0x26, 0x0b, 0x46, 0x27, 0x93, 0x0b, 0x0f, 0xe5, + 0x18, 0x6f, 0x3c, 0x94, 0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xb1, 0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0x47, + 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8b, + 0x47, 0x72, 0x0c, 0x1f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, + 0x8d, 0xc7, 0x72, 0x0c, 0x49, 0x6c, 0x60, 0x2f, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x38, + 0x3c, 0x9d, 0x4b, 0x2a, 0x01, 0x00, 0x00, +} + +func (this *Network) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Network) + if !ok { + that2, ok := that.(Network) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Properties) != len(that1.Properties) { + return false + } + for i := range this.Properties { + if this.Properties[i] != that1.Properties[i] { + return false + } + } + return true +} +func (this *Network) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.Network{") + keysForProperties := make([]string, 0, len(this.Properties)) + for k, _ := range this.Properties { + keysForProperties = append(keysForProperties, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForProperties) + mapStringForProperties := "map[string]string{" + for _, k := range keysForProperties { + mapStringForProperties += fmt.Sprintf("%#v: %#v,", k, this.Properties[k]) + } + mapStringForProperties += "}" + if this.Properties != nil { + s = append(s, "Properties: "+mapStringForProperties+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringNetwork(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Network) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Network) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Network) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Properties) > 0 { + for k := range m.Properties { + v := m.Properties[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintNetwork(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintNetwork(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintNetwork(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintNetwork(dAtA []byte, offset int, v uint64) int { + offset -= sovNetwork(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Network) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Properties) > 0 { + for k, v := range m.Properties { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovNetwork(uint64(len(k))) + 1 + len(v) + sovNetwork(uint64(len(v))) + n += mapEntrySize + 1 + sovNetwork(uint64(mapEntrySize)) + } + } + return n +} + +func sovNetwork(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozNetwork(x uint64) (n int) { + return sovNetwork(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Network) String() string { + if this == nil { + return "nil" + } + keysForProperties := make([]string, 0, len(this.Properties)) + for k, _ := range this.Properties { + keysForProperties = append(keysForProperties, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForProperties) + mapStringForProperties := "map[string]string{" + for _, k := range keysForProperties { + mapStringForProperties += fmt.Sprintf("%v: %v,", k, this.Properties[k]) + } + mapStringForProperties += "}" + s := strings.Join([]string{`&Network{`, + `Properties:` + mapStringForProperties + `,`, + `}`, + }, "") + return s +} +func valueToStringNetwork(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Network) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetwork + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Network: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Network: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Properties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetwork + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNetwork + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNetwork + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Properties == nil { + m.Properties = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetwork + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetwork + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNetwork + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthNetwork + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetwork + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthNetwork + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthNetwork + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipNetwork(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthNetwork + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Properties[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipNetwork(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthNetwork + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipNetwork(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNetwork + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNetwork + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNetwork + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthNetwork + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupNetwork + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthNetwork + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthNetwork = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowNetwork = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupNetwork = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/network.proto b/vendor/code.cloudfoundry.org/bbs/models/network.proto new file mode 100644 index 00000000..95185656 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/network.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.goproto_enum_prefix_all) = true; + +message Network { + map properties = 1 [(gogoproto.jsontag) = "properties,omitempty"]; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/package.go b/vendor/code.cloudfoundry.org/bbs/models/package.go new file mode 100644 index 00000000..ab6aeab0 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/package.go @@ -0,0 +1 @@ +package models // import "code.cloudfoundry.org/bbs/models" diff --git a/vendor/code.cloudfoundry.org/bbs/models/ping.pb.go b/vendor/code.cloudfoundry.org/bbs/models/ping.pb.go new file mode 100644 index 00000000..6acb0b54 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/ping.pb.go @@ -0,0 +1,368 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ping.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type PingResponse struct { + Available bool `protobuf:"varint,1,opt,name=available,proto3" json:"available"` +} + +func (m *PingResponse) Reset() { *m = PingResponse{} } +func (*PingResponse) ProtoMessage() {} +func (*PingResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6d51d96c3ad891f5, []int{0} +} +func (m *PingResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PingResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PingResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PingResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PingResponse.Merge(m, src) +} +func (m *PingResponse) XXX_Size() int { + return m.Size() +} +func (m *PingResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PingResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PingResponse proto.InternalMessageInfo + +func (m *PingResponse) GetAvailable() bool { + if m != nil { + return m.Available + } + return false +} + +func init() { + proto.RegisterType((*PingResponse)(nil), "models.PingResponse") +} + +func init() { proto.RegisterFile("ping.proto", fileDescriptor_6d51d96c3ad891f5) } + +var fileDescriptor_6d51d96c3ad891f5 = []byte{ + // 181 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xc8, 0xcc, 0x4b, + 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0xcb, 0xcd, 0x4f, 0x49, 0xcd, 0x29, 0x96, 0xd2, + 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, 0xcf, 0xd7, + 0x07, 0x4b, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xa6, 0x64, 0xcd, 0xc5, + 0x13, 0x90, 0x99, 0x97, 0x1e, 0x94, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a, 0xa4, 0xcd, 0xc5, + 0x99, 0x58, 0x96, 0x98, 0x99, 0x93, 0x98, 0x94, 0x93, 0x2a, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xe1, + 0xc4, 0xfb, 0xea, 0x9e, 0x3c, 0x42, 0x30, 0x08, 0xc1, 0x74, 0x32, 0xb9, 0xf0, 0x50, 0x8e, 0xe1, + 0xc6, 0x43, 0x39, 0x86, 0x0f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, 0x31, 0xae, 0x78, 0x24, 0xc7, + 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, + 0xc7, 0xf0, 0xe1, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, + 0x2c, 0xc7, 0x90, 0xc4, 0x06, 0xb6, 0xd9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x8c, 0x41, 0x03, + 0x47, 0xbe, 0x00, 0x00, 0x00, +} + +func (this *PingResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PingResponse) + if !ok { + that2, ok := that.(PingResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Available != that1.Available { + return false + } + return true +} +func (this *PingResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.PingResponse{") + s = append(s, "Available: "+fmt.Sprintf("%#v", this.Available)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringPing(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *PingResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PingResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PingResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Available { + i-- + if m.Available { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintPing(dAtA []byte, offset int, v uint64) int { + offset -= sovPing(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PingResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Available { + n += 2 + } + return n +} + +func sovPing(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozPing(x uint64) (n int) { + return sovPing(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PingResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PingResponse{`, + `Available:` + fmt.Sprintf("%v", this.Available) + `,`, + `}`, + }, "") + return s +} +func valueToStringPing(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PingResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPing + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PingResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PingResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Available", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPing + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Available = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipPing(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPing + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPing(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPing + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPing + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPing + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthPing + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupPing + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthPing + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthPing = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPing = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupPing = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/ping.proto b/vendor/code.cloudfoundry.org/bbs/models/ping.proto new file mode 100644 index 00000000..2841864f --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/ping.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +message PingResponse { + bool available = 1 [(gogoproto.jsontag) = "available"]; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/restart_calculator.go b/vendor/code.cloudfoundry.org/bbs/models/restart_calculator.go new file mode 100644 index 00000000..1246dbcc --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/restart_calculator.go @@ -0,0 +1,85 @@ +package models + +import ( + "fmt" + "math" + "time" +) + +const DefaultImmediateRestarts = 3 +const DefaultMaxBackoffDuration = 16 * time.Minute +const DefaultMaxRestarts = 200 + +const CrashBackoffMinDuration = 30 * time.Second + +func exponentialBackoff(exponent, max int32) time.Duration { + if exponent > max { + exponent = max + } + return CrashBackoffMinDuration * time.Duration(powerOfTwo(exponent)) +} + +func powerOfTwo(pow int32) int32 { + if pow < 0 { + panic("pow cannot be negative") + } + return 1 << uint(pow) +} + +func calculateMaxBackoffCount(maxDuration time.Duration) int32 { + total := math.Ceil(float64(maxDuration) / float64(CrashBackoffMinDuration)) + return int32(math.Logb(total)) +} + +type RestartCalculator struct { + ImmediateRestarts int32 `json:"immediate_restarts"` + MaxBackoffCount int32 `json:"max_backoff_count"` + MaxBackoffDuration time.Duration `json:"max_backoff_duration"` + MaxRestartAttempts int32 `json:"max_restart_attempts"` +} + +func NewDefaultRestartCalculator() RestartCalculator { + return NewRestartCalculator(DefaultImmediateRestarts, DefaultMaxBackoffDuration, DefaultMaxRestarts) +} + +func NewRestartCalculator(immediateRestarts int32, maxBackoffDuration time.Duration, maxRestarts int32) RestartCalculator { + return RestartCalculator{ + ImmediateRestarts: immediateRestarts, + MaxBackoffDuration: maxBackoffDuration, + MaxBackoffCount: calculateMaxBackoffCount(maxBackoffDuration), + MaxRestartAttempts: maxRestarts, + } +} + +func (r RestartCalculator) Validate() error { + var validationError ValidationError + if r.MaxBackoffDuration < CrashBackoffMinDuration { + err := fmt.Errorf("MaxBackoffDuration '%s' must be larger than CrashBackoffMinDuration '%s'", r.MaxBackoffDuration, CrashBackoffMinDuration) + validationError = validationError.Append(err) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (r RestartCalculator) ShouldRestart(now, crashedAt int64, crashCount int32) bool { + switch { + case crashCount < r.ImmediateRestarts: + return true + + case crashCount < r.MaxRestartAttempts: + backoffDuration := exponentialBackoff(crashCount-r.ImmediateRestarts, r.MaxBackoffCount) + if backoffDuration > r.MaxBackoffDuration { + backoffDuration = r.MaxBackoffDuration + } + nextRestartTime := crashedAt + backoffDuration.Nanoseconds() + if nextRestartTime <= now { + return true + } + } + + return false +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/routes.go b/vendor/code.cloudfoundry.org/bbs/models/routes.go new file mode 100644 index 00000000..6097fe12 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/routes.go @@ -0,0 +1,77 @@ +package models + +import ( + "bytes" + "encoding/json" +) + +type Routes map[string]*json.RawMessage + +func (r *Routes) protoRoutes() *ProtoRoutes { + pr := &ProtoRoutes{ + Routes: map[string][]byte{}, + } + + for k, v := range *r { + pr.Routes[k] = *v + } + + return pr +} + +func (r *Routes) Marshal() ([]byte, error) { + return r.protoRoutes().Marshal() +} + +func (r *Routes) MarshalTo(data []byte) (n int, err error) { + return r.protoRoutes().MarshalTo(data) +} + +func (r *Routes) Unmarshal(data []byte) error { + pr := &ProtoRoutes{} + err := pr.Unmarshal(data) + if err != nil { + return err + } + + if pr.Routes == nil { + return nil + } + + routes := map[string]*json.RawMessage{} + for k, v := range pr.Routes { + raw := json.RawMessage(v) + routes[k] = &raw + } + *r = routes + + return nil +} + +func (r *Routes) Size() int { + if r == nil { + return 0 + } + + return r.protoRoutes().Size() +} + +func (r *Routes) Equal(other Routes) bool { + for k, v := range *r { + if !bytes.Equal(*v, *other[k]) { + return false + } + } + return true +} + +func (r Routes) Validate() error { + totalRoutesLength := 0 + for _, value := range r { + totalRoutesLength += len(*value) + if totalRoutesLength > maximumRouteLength { + return ErrInvalidField{"routes"} + } + } + return nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/security_group.pb.go b/vendor/code.cloudfoundry.org/bbs/models/security_group.pb.go new file mode 100644 index 00000000..87b40e03 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/security_group.pb.go @@ -0,0 +1,1282 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: security_group.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type PortRange struct { + Start uint32 `protobuf:"varint,1,opt,name=start,proto3" json:"start"` + End uint32 `protobuf:"varint,2,opt,name=end,proto3" json:"end"` +} + +func (m *PortRange) Reset() { *m = PortRange{} } +func (*PortRange) ProtoMessage() {} +func (*PortRange) Descriptor() ([]byte, []int) { + return fileDescriptor_ff465b8f55f128fd, []int{0} +} +func (m *PortRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PortRange.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PortRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_PortRange.Merge(m, src) +} +func (m *PortRange) XXX_Size() int { + return m.Size() +} +func (m *PortRange) XXX_DiscardUnknown() { + xxx_messageInfo_PortRange.DiscardUnknown(m) +} + +var xxx_messageInfo_PortRange proto.InternalMessageInfo + +func (m *PortRange) GetStart() uint32 { + if m != nil { + return m.Start + } + return 0 +} + +func (m *PortRange) GetEnd() uint32 { + if m != nil { + return m.End + } + return 0 +} + +type ICMPInfo struct { + Type int32 `protobuf:"varint,1,opt,name=type,proto3" json:"type"` + Code int32 `protobuf:"varint,2,opt,name=code,proto3" json:"code"` +} + +func (m *ICMPInfo) Reset() { *m = ICMPInfo{} } +func (*ICMPInfo) ProtoMessage() {} +func (*ICMPInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_ff465b8f55f128fd, []int{1} +} +func (m *ICMPInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ICMPInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ICMPInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ICMPInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ICMPInfo.Merge(m, src) +} +func (m *ICMPInfo) XXX_Size() int { + return m.Size() +} +func (m *ICMPInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ICMPInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ICMPInfo proto.InternalMessageInfo + +func (m *ICMPInfo) GetType() int32 { + if m != nil { + return m.Type + } + return 0 +} + +func (m *ICMPInfo) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +type SecurityGroupRule struct { + Protocol string `protobuf:"bytes,1,opt,name=protocol,proto3" json:"protocol,omitempty"` + Destinations []string `protobuf:"bytes,2,rep,name=destinations,proto3" json:"destinations,omitempty"` + Ports []uint32 `protobuf:"varint,3,rep,name=ports,proto3" json:"ports,omitempty"` + PortRange *PortRange `protobuf:"bytes,4,opt,name=port_range,json=portRange,proto3" json:"port_range,omitempty"` + IcmpInfo *ICMPInfo `protobuf:"bytes,5,opt,name=icmp_info,json=icmpInfo,proto3" json:"icmp_info,omitempty"` + Log bool `protobuf:"varint,6,opt,name=log,proto3" json:"log"` + Annotations []string `protobuf:"bytes,7,rep,name=annotations,proto3" json:"annotations,omitempty"` +} + +func (m *SecurityGroupRule) Reset() { *m = SecurityGroupRule{} } +func (*SecurityGroupRule) ProtoMessage() {} +func (*SecurityGroupRule) Descriptor() ([]byte, []int) { + return fileDescriptor_ff465b8f55f128fd, []int{2} +} +func (m *SecurityGroupRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecurityGroupRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SecurityGroupRule.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SecurityGroupRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityGroupRule.Merge(m, src) +} +func (m *SecurityGroupRule) XXX_Size() int { + return m.Size() +} +func (m *SecurityGroupRule) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityGroupRule.DiscardUnknown(m) +} + +var xxx_messageInfo_SecurityGroupRule proto.InternalMessageInfo + +func (m *SecurityGroupRule) GetProtocol() string { + if m != nil { + return m.Protocol + } + return "" +} + +func (m *SecurityGroupRule) GetDestinations() []string { + if m != nil { + return m.Destinations + } + return nil +} + +func (m *SecurityGroupRule) GetPorts() []uint32 { + if m != nil { + return m.Ports + } + return nil +} + +func (m *SecurityGroupRule) GetPortRange() *PortRange { + if m != nil { + return m.PortRange + } + return nil +} + +func (m *SecurityGroupRule) GetIcmpInfo() *ICMPInfo { + if m != nil { + return m.IcmpInfo + } + return nil +} + +func (m *SecurityGroupRule) GetLog() bool { + if m != nil { + return m.Log + } + return false +} + +func (m *SecurityGroupRule) GetAnnotations() []string { + if m != nil { + return m.Annotations + } + return nil +} + +func init() { + proto.RegisterType((*PortRange)(nil), "models.PortRange") + proto.RegisterType((*ICMPInfo)(nil), "models.ICMPInfo") + proto.RegisterType((*SecurityGroupRule)(nil), "models.SecurityGroupRule") +} + +func init() { proto.RegisterFile("security_group.proto", fileDescriptor_ff465b8f55f128fd) } + +var fileDescriptor_ff465b8f55f128fd = []byte{ + // 402 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0xb1, 0x6e, 0xdb, 0x30, + 0x10, 0x86, 0x45, 0x2b, 0x76, 0x24, 0xa6, 0x01, 0x12, 0xa2, 0x03, 0x13, 0x14, 0x94, 0xa0, 0x49, + 0x4b, 0x94, 0xa2, 0xed, 0x13, 0xa8, 0x40, 0x83, 0x0c, 0x05, 0x02, 0xf6, 0x01, 0x0c, 0x59, 0xa2, + 0x55, 0x01, 0x32, 0x4f, 0x90, 0xa8, 0x21, 0x5b, 0xf7, 0x2e, 0x7d, 0x8c, 0x3e, 0x4a, 0x47, 0x8f, + 0x99, 0x8c, 0x5a, 0x5e, 0x0a, 0x4f, 0x79, 0x84, 0x82, 0x27, 0xc7, 0x68, 0x97, 0x13, 0xff, 0xff, + 0xd7, 0x1d, 0xee, 0x3e, 0xfa, 0xba, 0x53, 0x79, 0xdf, 0x56, 0xe6, 0x71, 0x5e, 0xb6, 0xd0, 0x37, + 0x49, 0xd3, 0x82, 0x01, 0x36, 0x5b, 0x41, 0xa1, 0xea, 0xee, 0xfa, 0xa6, 0xac, 0xcc, 0xd7, 0x7e, + 0x91, 0xe4, 0xb0, 0xba, 0x2d, 0xa1, 0x84, 0x5b, 0x8c, 0x17, 0xfd, 0x12, 0x15, 0x0a, 0x7c, 0x8d, + 0x6d, 0xd1, 0x1d, 0xf5, 0x1f, 0xa0, 0x35, 0x32, 0xd3, 0xa5, 0x62, 0x01, 0x9d, 0x76, 0x26, 0x6b, + 0x0d, 0x27, 0x21, 0x89, 0xcf, 0x53, 0x7f, 0xbf, 0x09, 0x46, 0x43, 0x8e, 0x1f, 0x76, 0x45, 0x5d, + 0xa5, 0x0b, 0x3e, 0xc1, 0xf8, 0x74, 0xbf, 0x09, 0xac, 0x94, 0xb6, 0x44, 0x9f, 0xa8, 0x77, 0xff, + 0xf1, 0xf3, 0xc3, 0xbd, 0x5e, 0x02, 0x7b, 0x43, 0x4f, 0xcc, 0x63, 0xa3, 0x70, 0xcc, 0x34, 0xf5, + 0xf6, 0x9b, 0x00, 0xb5, 0xc4, 0x6a, 0xd3, 0x1c, 0x0a, 0x85, 0x53, 0x0e, 0xa9, 0xd5, 0x12, 0x6b, + 0xf4, 0x7d, 0x42, 0x2f, 0xbf, 0x1c, 0x0e, 0xbc, 0xb3, 0xf7, 0xc9, 0xbe, 0x56, 0xec, 0x9a, 0x7a, + 0xb8, 0x6f, 0x0e, 0x35, 0x4e, 0xf5, 0xe5, 0x51, 0xb3, 0x88, 0xbe, 0x2a, 0x54, 0x67, 0x2a, 0x9d, + 0x99, 0x0a, 0x74, 0xc7, 0x27, 0xa1, 0x1b, 0xfb, 0xf2, 0x3f, 0x8f, 0x71, 0x3a, 0x6d, 0xa0, 0x35, + 0x1d, 0x77, 0x43, 0x37, 0x3e, 0x4f, 0x27, 0x17, 0x8e, 0x1c, 0x0d, 0xf6, 0x96, 0x52, 0xfb, 0x98, + 0xb7, 0x96, 0x00, 0x3f, 0x09, 0x49, 0x7c, 0xf6, 0xee, 0x32, 0x19, 0x61, 0x26, 0x47, 0x34, 0xd2, + 0x6f, 0x8e, 0x94, 0x6e, 0xa8, 0x5f, 0xe5, 0xab, 0x66, 0x5e, 0xe9, 0x25, 0xf0, 0x29, 0x36, 0x5c, + 0xbc, 0x34, 0xbc, 0x20, 0x90, 0x9e, 0xfd, 0x05, 0x61, 0x5c, 0x51, 0xb7, 0x86, 0x92, 0xcf, 0x42, + 0x12, 0x7b, 0x23, 0xb3, 0x1a, 0x4a, 0x69, 0x0b, 0x0b, 0xe9, 0x59, 0xa6, 0x35, 0x98, 0xc3, 0xe2, + 0xa7, 0xb8, 0xf8, 0xbf, 0x56, 0xfa, 0x61, 0xbd, 0x15, 0xce, 0xd3, 0x56, 0x38, 0xcf, 0x5b, 0x41, + 0xbe, 0x0d, 0x82, 0xfc, 0x1c, 0x04, 0xf9, 0x35, 0x08, 0xb2, 0x1e, 0x04, 0xf9, 0x3d, 0x08, 0xf2, + 0x67, 0x10, 0xce, 0xf3, 0x20, 0xc8, 0x8f, 0x9d, 0x70, 0xd6, 0x3b, 0xe1, 0x3c, 0xed, 0x84, 0xb3, + 0x98, 0x21, 0x9b, 0xf7, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xfa, 0xd0, 0x92, 0x85, 0x2a, 0x02, + 0x00, 0x00, +} + +func (this *PortRange) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PortRange) + if !ok { + that2, ok := that.(PortRange) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Start != that1.Start { + return false + } + if this.End != that1.End { + return false + } + return true +} +func (this *ICMPInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ICMPInfo) + if !ok { + that2, ok := that.(ICMPInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if this.Code != that1.Code { + return false + } + return true +} +func (this *SecurityGroupRule) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SecurityGroupRule) + if !ok { + that2, ok := that.(SecurityGroupRule) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Protocol != that1.Protocol { + return false + } + if len(this.Destinations) != len(that1.Destinations) { + return false + } + for i := range this.Destinations { + if this.Destinations[i] != that1.Destinations[i] { + return false + } + } + if len(this.Ports) != len(that1.Ports) { + return false + } + for i := range this.Ports { + if this.Ports[i] != that1.Ports[i] { + return false + } + } + if !this.PortRange.Equal(that1.PortRange) { + return false + } + if !this.IcmpInfo.Equal(that1.IcmpInfo) { + return false + } + if this.Log != that1.Log { + return false + } + if len(this.Annotations) != len(that1.Annotations) { + return false + } + for i := range this.Annotations { + if this.Annotations[i] != that1.Annotations[i] { + return false + } + } + return true +} +func (this *PortRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.PortRange{") + s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n") + s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ICMPInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.ICMPInfo{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "Code: "+fmt.Sprintf("%#v", this.Code)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SecurityGroupRule) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&models.SecurityGroupRule{") + s = append(s, "Protocol: "+fmt.Sprintf("%#v", this.Protocol)+",\n") + s = append(s, "Destinations: "+fmt.Sprintf("%#v", this.Destinations)+",\n") + s = append(s, "Ports: "+fmt.Sprintf("%#v", this.Ports)+",\n") + if this.PortRange != nil { + s = append(s, "PortRange: "+fmt.Sprintf("%#v", this.PortRange)+",\n") + } + if this.IcmpInfo != nil { + s = append(s, "IcmpInfo: "+fmt.Sprintf("%#v", this.IcmpInfo)+",\n") + } + s = append(s, "Log: "+fmt.Sprintf("%#v", this.Log)+",\n") + s = append(s, "Annotations: "+fmt.Sprintf("%#v", this.Annotations)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringSecurityGroup(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *PortRange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PortRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PortRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.End != 0 { + i = encodeVarintSecurityGroup(dAtA, i, uint64(m.End)) + i-- + dAtA[i] = 0x10 + } + if m.Start != 0 { + i = encodeVarintSecurityGroup(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ICMPInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ICMPInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ICMPInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Code != 0 { + i = encodeVarintSecurityGroup(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x10 + } + if m.Type != 0 { + i = encodeVarintSecurityGroup(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SecurityGroupRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecurityGroupRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecurityGroupRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Annotations) > 0 { + for iNdEx := len(m.Annotations) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Annotations[iNdEx]) + copy(dAtA[i:], m.Annotations[iNdEx]) + i = encodeVarintSecurityGroup(dAtA, i, uint64(len(m.Annotations[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if m.Log { + i-- + if m.Log { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.IcmpInfo != nil { + { + size, err := m.IcmpInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSecurityGroup(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.PortRange != nil { + { + size, err := m.PortRange.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSecurityGroup(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintSecurityGroup(dAtA, i, uint64(m.Ports[iNdEx])) + i-- + dAtA[i] = 0x18 + } + } + if len(m.Destinations) > 0 { + for iNdEx := len(m.Destinations) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Destinations[iNdEx]) + copy(dAtA[i:], m.Destinations[iNdEx]) + i = encodeVarintSecurityGroup(dAtA, i, uint64(len(m.Destinations[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Protocol) > 0 { + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintSecurityGroup(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintSecurityGroup(dAtA []byte, offset int, v uint64) int { + offset -= sovSecurityGroup(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PortRange) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Start != 0 { + n += 1 + sovSecurityGroup(uint64(m.Start)) + } + if m.End != 0 { + n += 1 + sovSecurityGroup(uint64(m.End)) + } + return n +} + +func (m *ICMPInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovSecurityGroup(uint64(m.Type)) + } + if m.Code != 0 { + n += 1 + sovSecurityGroup(uint64(m.Code)) + } + return n +} + +func (m *SecurityGroupRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Protocol) + if l > 0 { + n += 1 + l + sovSecurityGroup(uint64(l)) + } + if len(m.Destinations) > 0 { + for _, s := range m.Destinations { + l = len(s) + n += 1 + l + sovSecurityGroup(uint64(l)) + } + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + n += 1 + sovSecurityGroup(uint64(e)) + } + } + if m.PortRange != nil { + l = m.PortRange.Size() + n += 1 + l + sovSecurityGroup(uint64(l)) + } + if m.IcmpInfo != nil { + l = m.IcmpInfo.Size() + n += 1 + l + sovSecurityGroup(uint64(l)) + } + if m.Log { + n += 2 + } + if len(m.Annotations) > 0 { + for _, s := range m.Annotations { + l = len(s) + n += 1 + l + sovSecurityGroup(uint64(l)) + } + } + return n +} + +func sovSecurityGroup(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozSecurityGroup(x uint64) (n int) { + return sovSecurityGroup(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PortRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortRange{`, + `Start:` + fmt.Sprintf("%v", this.Start) + `,`, + `End:` + fmt.Sprintf("%v", this.End) + `,`, + `}`, + }, "") + return s +} +func (this *ICMPInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ICMPInfo{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Code:` + fmt.Sprintf("%v", this.Code) + `,`, + `}`, + }, "") + return s +} +func (this *SecurityGroupRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecurityGroupRule{`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `Destinations:` + fmt.Sprintf("%v", this.Destinations) + `,`, + `Ports:` + fmt.Sprintf("%v", this.Ports) + `,`, + `PortRange:` + strings.Replace(this.PortRange.String(), "PortRange", "PortRange", 1) + `,`, + `IcmpInfo:` + strings.Replace(this.IcmpInfo.String(), "ICMPInfo", "ICMPInfo", 1) + `,`, + `Log:` + fmt.Sprintf("%v", this.Log) + `,`, + `Annotations:` + fmt.Sprintf("%v", this.Annotations) + `,`, + `}`, + }, "") + return s +} +func valueToStringSecurityGroup(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PortRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + m.Start = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Start |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + m.End = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.End |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSecurityGroup(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSecurityGroup + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ICMPInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ICMPInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ICMPInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSecurityGroup(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSecurityGroup + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecurityGroupRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecurityGroupRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecurityGroupRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSecurityGroup + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSecurityGroup + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Destinations", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSecurityGroup + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSecurityGroup + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Destinations = append(m.Destinations, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ports = append(m.Ports, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthSecurityGroup + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthSecurityGroup + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Ports) == 0 { + m.Ports = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ports = append(m.Ports, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSecurityGroup + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSecurityGroup + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PortRange == nil { + m.PortRange = &PortRange{} + } + if err := m.PortRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IcmpInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSecurityGroup + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSecurityGroup + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IcmpInfo == nil { + m.IcmpInfo = &ICMPInfo{} + } + if err := m.IcmpInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Log = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSecurityGroup + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSecurityGroup + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Annotations = append(m.Annotations, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSecurityGroup(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSecurityGroup + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSecurityGroup(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSecurityGroup + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthSecurityGroup + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupSecurityGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthSecurityGroup + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthSecurityGroup = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSecurityGroup = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupSecurityGroup = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/security_group.proto b/vendor/code.cloudfoundry.org/bbs/models/security_group.proto new file mode 100644 index 00000000..ad28c29c --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/security_group.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +message PortRange { + uint32 start = 1 [(gogoproto.jsontag) = "start"]; + uint32 end = 2 [(gogoproto.jsontag) = "end"]; +} + +message ICMPInfo { + int32 type = 1 [(gogoproto.jsontag) = "type"]; + int32 code = 2 [(gogoproto.jsontag) = "code"]; +} + +message SecurityGroupRule { + string protocol = 1; + repeated string destinations = 2; + repeated uint32 ports = 3 [packed = false]; + PortRange port_range = 4; + ICMPInfo icmp_info = 5; + bool log = 6 [(gogoproto.jsontag) = "log"]; + repeated string annotations = 7; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/security_groups.go b/vendor/code.cloudfoundry.org/bbs/models/security_groups.go new file mode 100644 index 00000000..8ea22619 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/security_groups.go @@ -0,0 +1,157 @@ +package models + +import ( + "errors" + "net" + "strings" +) + +const ( + TCPProtocol = "tcp" + UDPProtocol = "udp" + ICMPProtocol = "icmp" + AllProtocol = "all" +) + +var errInvalidIP = errors.New("Invalid IP") + +func (rule SecurityGroupRule) Validate() error { + var validationError ValidationError + + switch rule.GetProtocol() { + case TCPProtocol: + validationError = rule.validatePorts() + if rule.IcmpInfo != nil { + validationError = validationError.Append(ErrInvalidField{"icmp_info"}) + } + case UDPProtocol: + validationError = rule.validatePorts() + if rule.IcmpInfo != nil { + validationError = validationError.Append(ErrInvalidField{"icmp_info"}) + } + case ICMPProtocol: + if rule.PortRange != nil { + validationError = validationError.Append(ErrInvalidField{"port_range"}) + } + if rule.Ports != nil { + validationError = validationError.Append(ErrInvalidField{"ports"}) + } + if rule.IcmpInfo == nil { + validationError = validationError.Append(ErrInvalidField{"icmp_info"}) + } + case AllProtocol: + if rule.PortRange != nil { + validationError = validationError.Append(ErrInvalidField{"port_range"}) + } + if rule.Ports != nil { + validationError = validationError.Append(ErrInvalidField{"ports"}) + } + if rule.IcmpInfo != nil { + validationError = validationError.Append(ErrInvalidField{"icmp_info"}) + } + default: + validationError = validationError.Append(ErrInvalidField{"protocol"}) + } + + if err := rule.validateDestinations(); err != nil { + validationError = validationError.Append(ErrInvalidField{"destinations [ " + err.Error() + " ]"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (rule SecurityGroupRule) validatePorts() ValidationError { + var validationError ValidationError + + if rule.PortRange == nil && rule.Ports == nil { + return validationError.Append(errors.New("Missing required field: ports or port_range")) + } + + if rule.PortRange != nil && rule.Ports != nil { + return validationError.Append(errors.New("Invalid: ports and port_range provided")) + } + + if rule.PortRange != nil { + if rule.GetPortRange().GetStart() < 1 { + validationError = validationError.Append(ErrInvalidField{"port_range"}) + } + if rule.GetPortRange().GetEnd() < 1 { + validationError = validationError.Append(ErrInvalidField{"port_range"}) + } + if rule.GetPortRange().GetStart() > rule.GetPortRange().GetEnd() { + validationError = validationError.Append(ErrInvalidField{"port_range"}) + } + } + + if rule.Ports != nil { + if len(rule.Ports) == 0 { + validationError = validationError.Append(ErrInvalidField{"ports"}) + } + + for _, p := range rule.Ports { + if p < 1 { + validationError = validationError.Append(ErrInvalidField{"ports"}) + } + } + } + + return validationError +} + +func (rule SecurityGroupRule) validateDestinations() error { + if len(rule.Destinations) == 0 { + return errors.New("Must have at least 1 destination") + } + + var validationError ValidationError + + var destinations []string + for _, d := range rule.Destinations { + destinations = append(destinations, strings.Split(d, ",")...) + } + + for _, d := range destinations { + n := strings.IndexAny(d, "-/") + if n == -1 { + if net.ParseIP(d) == nil { + validationError = validationError.Append(errInvalidIP) + continue + } + } else if d[n] == '/' { + _, _, err := net.ParseCIDR(d) + if err != nil { + validationError = validationError.Append(err) + continue + } + } else { + firstIP := net.ParseIP(d[:n]) + secondIP := net.ParseIP(d[n+1:]) + if firstIP == nil || secondIP == nil { + validationError = validationError.Append(errInvalidIP) + continue + } + for i, b := range firstIP { + if b < secondIP[i] { + break + } + + if b == secondIP[i] { + continue + } + + validationError = validationError.Append(errInvalidIP) + continue + } + } + } + + if !validationError.Empty() { + return validationError + } + + return nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/sidecar.go b/vendor/code.cloudfoundry.org/bbs/models/sidecar.go new file mode 100644 index 00000000..4f8ce3ab --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/sidecar.go @@ -0,0 +1,32 @@ +package models + +func (s Sidecar) Validate() error { + var validationError ValidationError + + if s.Action == nil { + validationError = validationError.Append(ErrInvalidActionType) + } else if err := s.Action.Validate(); err != nil { + validationError = validationError.Append(ErrInvalidField{"action"}) + validationError = validationError.Append(err) + } + + if s.GetMemoryMb() < 0 { + validationError = validationError.Append(ErrInvalidField{"memory_mb"}) + } + + if s.GetDiskMb() < 0 { + validationError = validationError.Append(ErrInvalidField{"disk_mb"}) + } + + return validationError +} + +func validateSidecars(sidecars []*Sidecar) ValidationError { + var validationError ValidationError + + for _, s := range sidecars { + validationError = validationError.Check(s) + } + + return validationError +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/sidecar.pb.go b/vendor/code.cloudfoundry.org/bbs/models/sidecar.pb.go new file mode 100644 index 00000000..3a9f6ec1 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/sidecar.pb.go @@ -0,0 +1,472 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: sidecar.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Sidecar struct { + Action *Action `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"` + DiskMb int32 `protobuf:"varint,2,opt,name=disk_mb,json=diskMb,proto3" json:"disk_mb"` + MemoryMb int32 `protobuf:"varint,3,opt,name=memory_mb,json=memoryMb,proto3" json:"memory_mb"` +} + +func (m *Sidecar) Reset() { *m = Sidecar{} } +func (*Sidecar) ProtoMessage() {} +func (*Sidecar) Descriptor() ([]byte, []int) { + return fileDescriptor_179ad3b13e6397ec, []int{0} +} +func (m *Sidecar) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sidecar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Sidecar.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Sidecar) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sidecar.Merge(m, src) +} +func (m *Sidecar) XXX_Size() int { + return m.Size() +} +func (m *Sidecar) XXX_DiscardUnknown() { + xxx_messageInfo_Sidecar.DiscardUnknown(m) +} + +var xxx_messageInfo_Sidecar proto.InternalMessageInfo + +func (m *Sidecar) GetAction() *Action { + if m != nil { + return m.Action + } + return nil +} + +func (m *Sidecar) GetDiskMb() int32 { + if m != nil { + return m.DiskMb + } + return 0 +} + +func (m *Sidecar) GetMemoryMb() int32 { + if m != nil { + return m.MemoryMb + } + return 0 +} + +func init() { + proto.RegisterType((*Sidecar)(nil), "models.Sidecar") +} + +func init() { proto.RegisterFile("sidecar.proto", fileDescriptor_179ad3b13e6397ec) } + +var fileDescriptor_179ad3b13e6397ec = []byte{ + // 239 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2d, 0xce, 0x4c, 0x49, + 0x4d, 0x4e, 0x2c, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0xcb, 0xcd, 0x4f, 0x49, 0xcd, + 0x29, 0x96, 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, + 0x4f, 0xcf, 0xd7, 0x07, 0x4b, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0x26, + 0xc5, 0x9b, 0x98, 0x5c, 0x92, 0x99, 0x9f, 0x57, 0x0c, 0xe1, 0x2a, 0x35, 0x33, 0x72, 0xb1, 0x07, + 0x43, 0xcc, 0x15, 0x52, 0xe3, 0x62, 0x83, 0x48, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x1b, 0xf1, + 0xe9, 0x41, 0xac, 0xd0, 0x73, 0x04, 0x8b, 0x06, 0x41, 0x65, 0x85, 0x54, 0xb8, 0xd8, 0x53, 0x32, + 0x8b, 0xb3, 0xe3, 0x73, 0x93, 0x24, 0x98, 0x14, 0x18, 0x35, 0x58, 0x9d, 0xb8, 0x5f, 0xdd, 0x93, + 0x87, 0x09, 0x05, 0xb1, 0x81, 0x18, 0xbe, 0x49, 0x42, 0x5a, 0x5c, 0x9c, 0xb9, 0xa9, 0xb9, 0xf9, + 0x45, 0x95, 0x20, 0x75, 0xcc, 0x60, 0x75, 0xbc, 0xaf, 0xee, 0xc9, 0x23, 0x04, 0x83, 0x38, 0x20, + 0x4c, 0xdf, 0x24, 0x27, 0x93, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0xf8, 0xf0, 0x50, + 0x8e, 0xb1, 0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, + 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8b, 0x47, 0x72, 0x0c, 0x1f, 0x1e, 0xc9, 0x31, 0x4e, + 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x49, 0x6c, 0x60, 0x2f, + 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xf3, 0x28, 0x34, 0x4f, 0x19, 0x01, 0x00, 0x00, +} + +func (this *Sidecar) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Sidecar) + if !ok { + that2, ok := that.(Sidecar) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Action.Equal(that1.Action) { + return false + } + if this.DiskMb != that1.DiskMb { + return false + } + if this.MemoryMb != that1.MemoryMb { + return false + } + return true +} +func (this *Sidecar) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&models.Sidecar{") + if this.Action != nil { + s = append(s, "Action: "+fmt.Sprintf("%#v", this.Action)+",\n") + } + s = append(s, "DiskMb: "+fmt.Sprintf("%#v", this.DiskMb)+",\n") + s = append(s, "MemoryMb: "+fmt.Sprintf("%#v", this.MemoryMb)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringSidecar(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Sidecar) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sidecar) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sidecar) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MemoryMb != 0 { + i = encodeVarintSidecar(dAtA, i, uint64(m.MemoryMb)) + i-- + dAtA[i] = 0x18 + } + if m.DiskMb != 0 { + i = encodeVarintSidecar(dAtA, i, uint64(m.DiskMb)) + i-- + dAtA[i] = 0x10 + } + if m.Action != nil { + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSidecar(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintSidecar(dAtA []byte, offset int, v uint64) int { + offset -= sovSidecar(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Sidecar) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != nil { + l = m.Action.Size() + n += 1 + l + sovSidecar(uint64(l)) + } + if m.DiskMb != 0 { + n += 1 + sovSidecar(uint64(m.DiskMb)) + } + if m.MemoryMb != 0 { + n += 1 + sovSidecar(uint64(m.MemoryMb)) + } + return n +} + +func sovSidecar(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozSidecar(x uint64) (n int) { + return sovSidecar(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Sidecar) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Sidecar{`, + `Action:` + strings.Replace(fmt.Sprintf("%v", this.Action), "Action", "Action", 1) + `,`, + `DiskMb:` + fmt.Sprintf("%v", this.DiskMb) + `,`, + `MemoryMb:` + fmt.Sprintf("%v", this.MemoryMb) + `,`, + `}`, + }, "") + return s +} +func valueToStringSidecar(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Sidecar) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sidecar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sidecar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSidecar + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSidecar + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Action == nil { + m.Action = &Action{} + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiskMb", wireType) + } + m.DiskMb = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DiskMb |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryMb", wireType) + } + m.MemoryMb = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemoryMb |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSidecar(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSidecar + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSidecar(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSidecar + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSidecar + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSidecar + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthSidecar + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupSidecar + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthSidecar + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthSidecar = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSidecar = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupSidecar = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/sidecar.proto b/vendor/code.cloudfoundry.org/bbs/models/sidecar.proto new file mode 100644 index 00000000..baa13bcb --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/sidecar.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "actions.proto"; + +message Sidecar { + Action action = 1; + + int32 disk_mb = 2 [(gogoproto.jsontag) = "disk_mb"]; + int32 memory_mb = 3 [(gogoproto.jsontag) = "memory_mb"]; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/task.go b/vendor/code.cloudfoundry.org/bbs/models/task.go new file mode 100644 index 00000000..2319fe99 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/task.go @@ -0,0 +1,206 @@ +package models + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "code.cloudfoundry.org/bbs/format" + "code.cloudfoundry.org/lager/v3" +) + +var taskGuidPattern = regexp.MustCompile(`^[a-zA-Z0-9_-]+$`) + +type TaskChange struct { + Before *Task + After *Task +} + +type TaskFilter struct { + Domain string + CellID string +} + +func (t *Task) LagerData() lager.Data { + return lager.Data{ + "task_guid": t.TaskGuid, + "domain": t.Domain, + "state": t.State, + "cell_id": t.CellId, + } +} + +func (task *Task) Validate() error { + var validationError ValidationError + + if task.Domain == "" { + validationError = validationError.Append(ErrInvalidField{"domain"}) + } + + if !taskGuidPattern.MatchString(task.TaskGuid) { + validationError = validationError.Append(ErrInvalidField{"task_guid"}) + } + + if task.TaskDefinition == nil { + validationError = validationError.Append(ErrInvalidField{"task_definition"}) + } else if defErr := task.TaskDefinition.Validate(); defErr != nil { + validationError = validationError.Append(defErr) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (t *Task) Copy() *Task { + newTask := *t + newTask.TaskDefinition = t.TaskDefinition.Copy() + return &newTask +} + +func (t *Task) ValidateTransitionTo(to Task_State) error { + var valid bool + from := t.State + switch to { + case Task_Running: + valid = from == Task_Pending + case Task_Completed: + valid = from == Task_Running + case Task_Resolving: + valid = from == Task_Completed + } + + if !valid { + return NewError( + Error_InvalidStateTransition, + fmt.Sprintf("Cannot transition from %s to %s", from.String(), to.String()), + ) + } + + return nil +} + +func (t *TaskDefinition) Copy() *TaskDefinition { + if t == nil { + return &TaskDefinition{} + } + newTaskDef := *t + return &newTaskDef +} + +func (def *TaskDefinition) Validate() error { + var validationError ValidationError + + if def.RootFs == "" { + validationError = validationError.Append(ErrInvalidField{"rootfs"}) + } else { + rootFsURL, err := url.Parse(def.RootFs) + if err != nil || rootFsURL.Scheme == "" { + validationError = validationError.Append(ErrInvalidField{"rootfs"}) + } + } + + if def.Action == nil { + validationError = validationError.Append(ErrInvalidActionType) + } else if err := def.Action.Validate(); err != nil { + validationError = validationError.Append(ErrInvalidField{"action"}) + validationError = validationError.Append(err) + } + + if def.MemoryMb < 0 { + validationError = validationError.Append(ErrInvalidField{"memory_mb"}) + } + + if def.DiskMb < 0 { + validationError = validationError.Append(ErrInvalidField{"disk_mb"}) + } + + if limit := def.LogRateLimit; limit != nil { + if limit.BytesPerSecond < -1 { + validationError = validationError.Append(ErrInvalidField{"log_rate_limit"}) + } + } + + if def.MaxPids < 0 { + validationError = validationError.Append(ErrInvalidField{"max_pids"}) + } + + if len(def.Annotation) > maximumAnnotationLength { + validationError = validationError.Append(ErrInvalidField{"annotation"}) + } + + for _, rule := range def.EgressRules { + err := rule.Validate() + if err != nil { + validationError = validationError.Append(ErrInvalidField{"egress_rules"}) + } + } + + if def.ImageUsername == "" && def.ImagePassword != "" { + validationError = validationError.Append(ErrInvalidField{"image_username"}) + } + + if def.ImageUsername != "" && def.ImagePassword == "" { + validationError = validationError.Append(ErrInvalidField{"image_password"}) + } + + err := validateCachedDependencies(def.CachedDependencies) + if err != nil { + validationError = validationError.Append(err) + } + + err = validateImageLayers(def.ImageLayers, def.LegacyDownloadUser) + if err != nil { + validationError = validationError.Append(err) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func downgradeTaskDefinitionV3ToV2(t *TaskDefinition) *TaskDefinition { + layers := ImageLayers(t.ImageLayers) + + t.CachedDependencies = append(layers.ToCachedDependencies(), t.CachedDependencies...) + t.Action = layers.ToDownloadActions(t.LegacyDownloadUser, t.Action) + t.ImageLayers = nil + + return t +} + +func (t *Task) VersionDownTo(v format.Version) *Task { + t = t.Copy() + + if v < t.Version() { + t.TaskDefinition = downgradeTaskDefinitionV3ToV2(t.TaskDefinition) + } + + return t +} + +func (t *Task) Version() format.Version { + return format.V3 +} + +func (s *Task_State) UnmarshalJSON(data []byte) error { + var name string + if err := json.Unmarshal(data, &name); err != nil { + return err + } + + if v, found := Task_State_value[name]; found { + *s = Task_State(v) + return nil + } + return fmt.Errorf("invalid state: %s", name) +} + +func (s Task_State) MarshalJSON() ([]byte, error) { + return json.Marshal(s.String()) +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/task.pb.go b/vendor/code.cloudfoundry.org/bbs/models/task.pb.go new file mode 100644 index 00000000..d41ff768 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/task.pb.go @@ -0,0 +1,3018 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: task.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Task_State int32 + +const ( + Task_Invalid Task_State = 0 + Task_Pending Task_State = 1 + Task_Running Task_State = 2 + Task_Completed Task_State = 3 + Task_Resolving Task_State = 4 +) + +var Task_State_name = map[int32]string{ + 0: "Invalid", + 1: "Pending", + 2: "Running", + 3: "Completed", + 4: "Resolving", +} + +var Task_State_value = map[string]int32{ + "Invalid": 0, + "Pending": 1, + "Running": 2, + "Completed": 3, + "Resolving": 4, +} + +func (Task_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_ce5d8dd45b4a91ff, []int{1, 0} +} + +type TaskDefinition struct { + RootFs string `protobuf:"bytes,1,opt,name=root_fs,json=rootFs,proto3" json:"rootfs"` + EnvironmentVariables []*EnvironmentVariable `protobuf:"bytes,2,rep,name=environment_variables,json=environmentVariables,proto3" json:"env,omitempty"` + Action *Action `protobuf:"bytes,3,opt,name=action,proto3" json:"action,omitempty"` + DiskMb int32 `protobuf:"varint,4,opt,name=disk_mb,json=diskMb,proto3" json:"disk_mb"` + MemoryMb int32 `protobuf:"varint,5,opt,name=memory_mb,json=memoryMb,proto3" json:"memory_mb"` + CpuWeight uint32 `protobuf:"varint,6,opt,name=cpu_weight,json=cpuWeight,proto3" json:"cpu_weight"` + Privileged bool `protobuf:"varint,7,opt,name=privileged,proto3" json:"privileged"` + LogSource string `protobuf:"bytes,8,opt,name=log_source,json=logSource,proto3" json:"log_source"` + LogGuid string `protobuf:"bytes,9,opt,name=log_guid,json=logGuid,proto3" json:"log_guid"` + MetricsGuid string `protobuf:"bytes,10,opt,name=metrics_guid,json=metricsGuid,proto3" json:"metrics_guid"` + ResultFile string `protobuf:"bytes,11,opt,name=result_file,json=resultFile,proto3" json:"result_file"` + CompletionCallbackUrl string `protobuf:"bytes,12,opt,name=completion_callback_url,json=completionCallbackUrl,proto3" json:"completion_callback_url,omitempty"` + Annotation string `protobuf:"bytes,13,opt,name=annotation,proto3" json:"annotation,omitempty"` + EgressRules []*SecurityGroupRule `protobuf:"bytes,14,rep,name=egress_rules,json=egressRules,proto3" json:"egress_rules,omitempty"` + CachedDependencies []*CachedDependency `protobuf:"bytes,15,rep,name=cached_dependencies,json=cachedDependencies,proto3" json:"cached_dependencies,omitempty"` + LegacyDownloadUser string `protobuf:"bytes,16,opt,name=legacy_download_user,json=legacyDownloadUser,proto3" json:"legacy_download_user,omitempty"` // Deprecated: Do not use. + TrustedSystemCertificatesPath string `protobuf:"bytes,17,opt,name=trusted_system_certificates_path,json=trustedSystemCertificatesPath,proto3" json:"trusted_system_certificates_path,omitempty"` + VolumeMounts []*VolumeMount `protobuf:"bytes,18,rep,name=volume_mounts,json=volumeMounts,proto3" json:"volume_mounts,omitempty"` + Network *Network `protobuf:"bytes,19,opt,name=network,proto3" json:"network,omitempty"` + PlacementTags []string `protobuf:"bytes,20,rep,name=placement_tags,json=placementTags,proto3" json:"placement_tags,omitempty"` + MaxPids int32 `protobuf:"varint,21,opt,name=max_pids,json=maxPids,proto3" json:"max_pids"` + CertificateProperties *CertificateProperties `protobuf:"bytes,22,opt,name=certificate_properties,json=certificateProperties,proto3" json:"certificate_properties,omitempty"` + ImageUsername string `protobuf:"bytes,23,opt,name=image_username,json=imageUsername,proto3" json:"image_username"` + ImagePassword string `protobuf:"bytes,24,opt,name=image_password,json=imagePassword,proto3" json:"image_password"` + ImageLayers []*ImageLayer `protobuf:"bytes,25,rep,name=image_layers,json=imageLayers,proto3" json:"image_layers,omitempty"` + LogRateLimit *LogRateLimit `protobuf:"bytes,26,opt,name=log_rate_limit,json=logRateLimit,proto3" json:"log_rate_limit,omitempty"` + MetricTags map[string]*MetricTagValue `protobuf:"bytes,27,rep,name=metric_tags,json=metricTags,proto3" json:"metric_tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *TaskDefinition) Reset() { *m = TaskDefinition{} } +func (*TaskDefinition) ProtoMessage() {} +func (*TaskDefinition) Descriptor() ([]byte, []int) { + return fileDescriptor_ce5d8dd45b4a91ff, []int{0} +} +func (m *TaskDefinition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TaskDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TaskDefinition.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TaskDefinition) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskDefinition.Merge(m, src) +} +func (m *TaskDefinition) XXX_Size() int { + return m.Size() +} +func (m *TaskDefinition) XXX_DiscardUnknown() { + xxx_messageInfo_TaskDefinition.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskDefinition proto.InternalMessageInfo + +func (m *TaskDefinition) GetRootFs() string { + if m != nil { + return m.RootFs + } + return "" +} + +func (m *TaskDefinition) GetEnvironmentVariables() []*EnvironmentVariable { + if m != nil { + return m.EnvironmentVariables + } + return nil +} + +func (m *TaskDefinition) GetAction() *Action { + if m != nil { + return m.Action + } + return nil +} + +func (m *TaskDefinition) GetDiskMb() int32 { + if m != nil { + return m.DiskMb + } + return 0 +} + +func (m *TaskDefinition) GetMemoryMb() int32 { + if m != nil { + return m.MemoryMb + } + return 0 +} + +func (m *TaskDefinition) GetCpuWeight() uint32 { + if m != nil { + return m.CpuWeight + } + return 0 +} + +func (m *TaskDefinition) GetPrivileged() bool { + if m != nil { + return m.Privileged + } + return false +} + +func (m *TaskDefinition) GetLogSource() string { + if m != nil { + return m.LogSource + } + return "" +} + +func (m *TaskDefinition) GetLogGuid() string { + if m != nil { + return m.LogGuid + } + return "" +} + +func (m *TaskDefinition) GetMetricsGuid() string { + if m != nil { + return m.MetricsGuid + } + return "" +} + +func (m *TaskDefinition) GetResultFile() string { + if m != nil { + return m.ResultFile + } + return "" +} + +func (m *TaskDefinition) GetCompletionCallbackUrl() string { + if m != nil { + return m.CompletionCallbackUrl + } + return "" +} + +func (m *TaskDefinition) GetAnnotation() string { + if m != nil { + return m.Annotation + } + return "" +} + +func (m *TaskDefinition) GetEgressRules() []*SecurityGroupRule { + if m != nil { + return m.EgressRules + } + return nil +} + +func (m *TaskDefinition) GetCachedDependencies() []*CachedDependency { + if m != nil { + return m.CachedDependencies + } + return nil +} + +// Deprecated: Do not use. +func (m *TaskDefinition) GetLegacyDownloadUser() string { + if m != nil { + return m.LegacyDownloadUser + } + return "" +} + +func (m *TaskDefinition) GetTrustedSystemCertificatesPath() string { + if m != nil { + return m.TrustedSystemCertificatesPath + } + return "" +} + +func (m *TaskDefinition) GetVolumeMounts() []*VolumeMount { + if m != nil { + return m.VolumeMounts + } + return nil +} + +func (m *TaskDefinition) GetNetwork() *Network { + if m != nil { + return m.Network + } + return nil +} + +func (m *TaskDefinition) GetPlacementTags() []string { + if m != nil { + return m.PlacementTags + } + return nil +} + +func (m *TaskDefinition) GetMaxPids() int32 { + if m != nil { + return m.MaxPids + } + return 0 +} + +func (m *TaskDefinition) GetCertificateProperties() *CertificateProperties { + if m != nil { + return m.CertificateProperties + } + return nil +} + +func (m *TaskDefinition) GetImageUsername() string { + if m != nil { + return m.ImageUsername + } + return "" +} + +func (m *TaskDefinition) GetImagePassword() string { + if m != nil { + return m.ImagePassword + } + return "" +} + +func (m *TaskDefinition) GetImageLayers() []*ImageLayer { + if m != nil { + return m.ImageLayers + } + return nil +} + +func (m *TaskDefinition) GetLogRateLimit() *LogRateLimit { + if m != nil { + return m.LogRateLimit + } + return nil +} + +func (m *TaskDefinition) GetMetricTags() map[string]*MetricTagValue { + if m != nil { + return m.MetricTags + } + return nil +} + +type Task struct { + *TaskDefinition `protobuf:"bytes,1,opt,name=task_definition,json=taskDefinition,proto3,embedded=task_definition" json:""` + TaskGuid string `protobuf:"bytes,2,opt,name=task_guid,json=taskGuid,proto3" json:"task_guid"` + Domain string `protobuf:"bytes,3,opt,name=domain,proto3" json:"domain"` + CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at"` + UpdatedAt int64 `protobuf:"varint,5,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at"` + FirstCompletedAt int64 `protobuf:"varint,6,opt,name=first_completed_at,json=firstCompletedAt,proto3" json:"first_completed_at"` + State Task_State `protobuf:"varint,7,opt,name=state,proto3,enum=models.Task_State" json:"state"` + CellId string `protobuf:"bytes,8,opt,name=cell_id,json=cellId,proto3" json:"cell_id"` + Result string `protobuf:"bytes,9,opt,name=result,proto3" json:"result"` + Failed bool `protobuf:"varint,10,opt,name=failed,proto3" json:"failed"` + FailureReason string `protobuf:"bytes,11,opt,name=failure_reason,json=failureReason,proto3" json:"failure_reason"` + RejectionCount int32 `protobuf:"varint,12,opt,name=rejection_count,json=rejectionCount,proto3" json:"rejection_count"` + RejectionReason string `protobuf:"bytes,13,opt,name=rejection_reason,json=rejectionReason,proto3" json:"rejection_reason"` +} + +func (m *Task) Reset() { *m = Task{} } +func (*Task) ProtoMessage() {} +func (*Task) Descriptor() ([]byte, []int) { + return fileDescriptor_ce5d8dd45b4a91ff, []int{1} +} +func (m *Task) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Task) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Task.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Task) XXX_Merge(src proto.Message) { + xxx_messageInfo_Task.Merge(m, src) +} +func (m *Task) XXX_Size() int { + return m.Size() +} +func (m *Task) XXX_DiscardUnknown() { + xxx_messageInfo_Task.DiscardUnknown(m) +} + +var xxx_messageInfo_Task proto.InternalMessageInfo + +func (m *Task) GetTaskGuid() string { + if m != nil { + return m.TaskGuid + } + return "" +} + +func (m *Task) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +func (m *Task) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +func (m *Task) GetUpdatedAt() int64 { + if m != nil { + return m.UpdatedAt + } + return 0 +} + +func (m *Task) GetFirstCompletedAt() int64 { + if m != nil { + return m.FirstCompletedAt + } + return 0 +} + +func (m *Task) GetState() Task_State { + if m != nil { + return m.State + } + return Task_Invalid +} + +func (m *Task) GetCellId() string { + if m != nil { + return m.CellId + } + return "" +} + +func (m *Task) GetResult() string { + if m != nil { + return m.Result + } + return "" +} + +func (m *Task) GetFailed() bool { + if m != nil { + return m.Failed + } + return false +} + +func (m *Task) GetFailureReason() string { + if m != nil { + return m.FailureReason + } + return "" +} + +func (m *Task) GetRejectionCount() int32 { + if m != nil { + return m.RejectionCount + } + return 0 +} + +func (m *Task) GetRejectionReason() string { + if m != nil { + return m.RejectionReason + } + return "" +} + +func init() { + proto.RegisterEnum("models.Task_State", Task_State_name, Task_State_value) + proto.RegisterType((*TaskDefinition)(nil), "models.TaskDefinition") + proto.RegisterMapType((map[string]*MetricTagValue)(nil), "models.TaskDefinition.MetricTagsEntry") + proto.RegisterType((*Task)(nil), "models.Task") +} + +func init() { proto.RegisterFile("task.proto", fileDescriptor_ce5d8dd45b4a91ff) } + +var fileDescriptor_ce5d8dd45b4a91ff = []byte{ + // 1351 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x56, 0xcd, 0x6e, 0xdb, 0xc6, + 0x16, 0x36, 0xed, 0x48, 0xb6, 0x46, 0x3f, 0x96, 0xc7, 0x3f, 0x61, 0x9c, 0x1b, 0x51, 0xf0, 0xbd, + 0x37, 0x75, 0x83, 0xc4, 0x29, 0x92, 0xb4, 0x48, 0x83, 0x00, 0x85, 0x65, 0x27, 0x86, 0x01, 0xbb, + 0x30, 0xc6, 0x71, 0xba, 0x24, 0x46, 0xe4, 0x88, 0x9e, 0x9a, 0xe4, 0x10, 0x33, 0x43, 0x39, 0xda, + 0xf5, 0x11, 0xfa, 0x14, 0x45, 0x1f, 0xa5, 0x4b, 0x2f, 0xb3, 0x22, 0x1a, 0x67, 0x53, 0x68, 0x95, + 0x47, 0x28, 0x66, 0x38, 0xa4, 0x24, 0xc7, 0x2b, 0x9d, 0xf3, 0x7d, 0xdf, 0x99, 0x9f, 0x33, 0x3c, + 0xe7, 0x08, 0x00, 0x89, 0xc5, 0xc5, 0x4e, 0xc2, 0x99, 0x64, 0xb0, 0x1a, 0x31, 0x9f, 0x84, 0x62, + 0xf3, 0x49, 0x40, 0xe5, 0x79, 0xda, 0xdf, 0xf1, 0x58, 0xf4, 0x34, 0x60, 0x01, 0x7b, 0xaa, 0xe9, + 0x7e, 0x3a, 0xd0, 0x9e, 0x76, 0xb4, 0x95, 0x87, 0x6d, 0x36, 0xb1, 0x27, 0x29, 0x8b, 0x85, 0x71, + 0xef, 0x93, 0x78, 0x48, 0x39, 0x8b, 0x23, 0x12, 0x4b, 0x77, 0x88, 0x39, 0xc5, 0xfd, 0x90, 0x14, + 0xe4, 0x9a, 0x20, 0x5e, 0xca, 0xa9, 0x1c, 0xb9, 0x01, 0x67, 0x69, 0x62, 0xd0, 0xbb, 0x1e, 0xf6, + 0xce, 0x89, 0xef, 0xfa, 0x24, 0x21, 0xb1, 0x4f, 0x62, 0x6f, 0x64, 0x08, 0x38, 0x64, 0x61, 0x1a, + 0x11, 0x37, 0x62, 0x69, 0x2c, 0x8b, 0xed, 0x62, 0x22, 0x2f, 0x19, 0x37, 0x87, 0xde, 0xfc, 0x8f, + 0x47, 0xb8, 0xa4, 0x03, 0xea, 0x61, 0x49, 0xdc, 0x84, 0xb3, 0x44, 0xb9, 0xe5, 0x7e, 0x2b, 0x34, + 0xc2, 0x01, 0x71, 0x43, 0x3c, 0x22, 0xbc, 0x38, 0x42, 0xc8, 0x02, 0x97, 0x2b, 0x75, 0x48, 0x23, + 0x5a, 0xac, 0xba, 0x12, 0x11, 0xc9, 0xa9, 0xe7, 0x4a, 0x1c, 0x98, 0xd8, 0xad, 0x3f, 0x1a, 0xa0, + 0xf5, 0x0e, 0x8b, 0x8b, 0x7d, 0x32, 0xa0, 0x31, 0x55, 0x57, 0x84, 0xff, 0x05, 0x8b, 0x9c, 0x31, + 0xe9, 0x0e, 0x84, 0x6d, 0x75, 0xad, 0xed, 0x5a, 0x0f, 0x8c, 0x33, 0xa7, 0xaa, 0xa0, 0x81, 0x40, + 0xfa, 0xf7, 0xad, 0x80, 0x1e, 0x58, 0xbf, 0x35, 0x05, 0xf6, 0x7c, 0x77, 0x61, 0xbb, 0xfe, 0xec, + 0xfe, 0x4e, 0x9e, 0xe6, 0x9d, 0x37, 0x13, 0xd1, 0x7b, 0xa3, 0xe9, 0xad, 0x8c, 0x33, 0xa7, 0x49, + 0xe2, 0xe1, 0x63, 0x16, 0x51, 0x49, 0xa2, 0x44, 0x8e, 0xd0, 0x1a, 0xf9, 0x5a, 0x27, 0xe0, 0x43, + 0x50, 0xcd, 0xd3, 0x6e, 0x2f, 0x74, 0xad, 0xed, 0xfa, 0xb3, 0x56, 0xb1, 0xea, 0xae, 0x46, 0x91, + 0x61, 0xe1, 0xff, 0xc0, 0xa2, 0x4f, 0xc5, 0x85, 0x1b, 0xf5, 0xed, 0x3b, 0x5d, 0x6b, 0xbb, 0xd2, + 0xab, 0x8f, 0x33, 0xa7, 0x80, 0x50, 0x55, 0x19, 0xc7, 0x7d, 0xf8, 0x08, 0xd4, 0x22, 0x12, 0x31, + 0x3e, 0x52, 0xba, 0x8a, 0xd6, 0x35, 0xc7, 0x99, 0x33, 0x01, 0xd1, 0x52, 0x6e, 0x1e, 0xf7, 0xe1, + 0x13, 0x00, 0xbc, 0x24, 0x75, 0x2f, 0x09, 0x0d, 0xce, 0xa5, 0x5d, 0xed, 0x5a, 0xdb, 0xcd, 0x5e, + 0x6b, 0x9c, 0x39, 0x53, 0x28, 0xaa, 0x79, 0x49, 0xfa, 0x8b, 0x36, 0xe1, 0x0e, 0x00, 0x09, 0xa7, + 0x43, 0x1a, 0x92, 0x80, 0xf8, 0xf6, 0x62, 0xd7, 0xda, 0x5e, 0xca, 0xe5, 0x13, 0x14, 0x4d, 0xd9, + 0x6a, 0x79, 0xf5, 0x40, 0x82, 0xa5, 0xdc, 0x23, 0xf6, 0x92, 0xce, 0xb2, 0xd6, 0x4f, 0x50, 0x54, + 0x0b, 0x59, 0x70, 0xaa, 0x4d, 0xf8, 0x0d, 0x58, 0x52, 0x44, 0x90, 0x52, 0xdf, 0xae, 0x69, 0x71, + 0x63, 0x9c, 0x39, 0x25, 0x86, 0x16, 0x43, 0x16, 0x1c, 0xa4, 0xd4, 0x87, 0xcf, 0x41, 0x23, 0x7f, + 0x62, 0x91, 0x8b, 0x81, 0x16, 0xb7, 0xc7, 0x99, 0x33, 0x83, 0xa3, 0xba, 0xf1, 0x74, 0xd0, 0x77, + 0xa0, 0xce, 0x89, 0x48, 0x43, 0xe9, 0x0e, 0x68, 0x48, 0xec, 0xba, 0x8e, 0x59, 0x1e, 0x67, 0xce, + 0x34, 0x8c, 0x40, 0xee, 0xbc, 0xa5, 0x21, 0x81, 0x3f, 0x80, 0xbb, 0x1e, 0x8b, 0x92, 0x90, 0xa8, + 0xec, 0xbb, 0x1e, 0x0e, 0xc3, 0x3e, 0xf6, 0x2e, 0xdc, 0x94, 0x87, 0x76, 0x43, 0x45, 0xa3, 0xf5, + 0x09, 0xbd, 0x67, 0xd8, 0x33, 0x1e, 0xc2, 0x0e, 0x00, 0x38, 0x8e, 0x99, 0xc4, 0xfa, 0x4d, 0x9b, + 0x5a, 0x3a, 0x85, 0xc0, 0xd7, 0xa0, 0x41, 0x02, 0x4e, 0x84, 0x70, 0x79, 0xaa, 0xbe, 0xa5, 0x96, + 0xfe, 0x96, 0xee, 0x15, 0xaf, 0x7e, 0x6a, 0xca, 0xea, 0x40, 0x55, 0x15, 0x4a, 0x43, 0x82, 0xea, + 0xb9, 0x5c, 0xd9, 0x02, 0x1e, 0x82, 0xd5, 0x9b, 0x25, 0x46, 0x89, 0xb0, 0x97, 0xf5, 0x22, 0x76, + 0xb1, 0xc8, 0x9e, 0x96, 0xec, 0x97, 0x45, 0x88, 0xa0, 0x37, 0x8b, 0x50, 0x22, 0xe0, 0x0b, 0xb0, + 0x16, 0x92, 0x00, 0x7b, 0x23, 0xd7, 0x67, 0x97, 0x71, 0xc8, 0xb0, 0xef, 0xa6, 0x82, 0x70, 0xbb, + 0xad, 0x73, 0x33, 0x6f, 0x5b, 0x08, 0xe6, 0xfc, 0xbe, 0xa1, 0xcf, 0x04, 0xe1, 0xf0, 0x00, 0x74, + 0x25, 0x4f, 0x85, 0x24, 0xbe, 0x2b, 0x46, 0x42, 0x92, 0xc8, 0x9d, 0x2a, 0x5b, 0xe1, 0x26, 0x58, + 0x9e, 0xdb, 0x2b, 0xfa, 0xd2, 0x0f, 0x8c, 0xee, 0x54, 0xcb, 0xf6, 0xa6, 0x54, 0x27, 0x58, 0x9e, + 0xc3, 0x97, 0xa0, 0x39, 0xdd, 0x13, 0x84, 0x0d, 0xf5, 0x1d, 0x56, 0x8b, 0x3b, 0xbc, 0xd7, 0xe4, + 0xb1, 0xe2, 0x50, 0x63, 0x38, 0x71, 0x04, 0xfc, 0x16, 0x2c, 0x9a, 0xce, 0x61, 0xaf, 0xea, 0x92, + 0x59, 0x2e, 0x62, 0x7e, 0xce, 0x61, 0x54, 0xf0, 0xf0, 0xff, 0xa0, 0x95, 0x84, 0xd8, 0x23, 0xba, + 0x7e, 0x55, 0x47, 0xb0, 0xd7, 0xba, 0x0b, 0xdb, 0x35, 0xd4, 0x2c, 0xd1, 0x77, 0x38, 0x10, 0xea, + 0xdb, 0x8b, 0xf0, 0x07, 0x37, 0xa1, 0xbe, 0xb0, 0xd7, 0x75, 0xd1, 0xe8, 0x6f, 0xaf, 0xc0, 0xd0, + 0x62, 0x84, 0x3f, 0x9c, 0x50, 0x5f, 0xc0, 0x77, 0x60, 0xe3, 0xf6, 0x2e, 0x65, 0x6f, 0xe8, 0x93, + 0x3c, 0x28, 0x5f, 0x60, 0xa2, 0x3a, 0x29, 0x45, 0x68, 0xdd, 0xbb, 0x0d, 0x86, 0x3f, 0x82, 0x56, + 0xde, 0xdd, 0x54, 0xfe, 0x63, 0x1c, 0x11, 0xfb, 0xae, 0x7e, 0x03, 0x38, 0xce, 0x9c, 0x1b, 0x0c, + 0x6a, 0x6a, 0xff, 0xcc, 0xb8, 0x93, 0xd0, 0x04, 0x0b, 0x71, 0xc9, 0xb8, 0x6f, 0xdb, 0x37, 0x43, + 0x0b, 0xc6, 0x84, 0x9e, 0x18, 0x17, 0x7e, 0x0f, 0x1a, 0x53, 0x3d, 0x55, 0xd8, 0xf7, 0x74, 0xfe, + 0x61, 0x71, 0x83, 0x43, 0xc5, 0x1d, 0x29, 0x0a, 0xd5, 0x69, 0x69, 0x0b, 0xf8, 0x0a, 0xb4, 0x66, + 0xfb, 0xae, 0xbd, 0xa9, 0xaf, 0xbe, 0x56, 0x04, 0x1e, 0xb1, 0x00, 0x61, 0x49, 0x8e, 0x14, 0x87, + 0x1a, 0xe1, 0x94, 0x07, 0x0f, 0x40, 0x7d, 0xaa, 0x3b, 0xdb, 0xf7, 0xf5, 0x8e, 0x0f, 0x8b, 0xc0, + 0xd9, 0x16, 0xbd, 0x73, 0xac, 0x95, 0xea, 0x7d, 0xde, 0xc4, 0x92, 0x8f, 0x10, 0x88, 0x4a, 0x60, + 0xf3, 0x0c, 0x2c, 0xdf, 0xa0, 0x61, 0x1b, 0x2c, 0x5c, 0x90, 0x51, 0xde, 0xcd, 0x91, 0x32, 0xe1, + 0x63, 0x50, 0x19, 0xe2, 0x30, 0x25, 0xf6, 0xbc, 0x3e, 0xe0, 0x46, 0xb1, 0x4f, 0x19, 0xf9, 0x5e, + 0xb1, 0x28, 0x17, 0xbd, 0x9a, 0x7f, 0x69, 0x6d, 0x7d, 0xa9, 0x80, 0x3b, 0xea, 0x14, 0xf0, 0x10, + 0x2c, 0xab, 0x71, 0xea, 0xfa, 0xe5, 0x71, 0xf4, 0xc2, 0x53, 0x8b, 0xcc, 0x1e, 0xb6, 0xb7, 0x74, + 0x95, 0x39, 0xd6, 0x38, 0x73, 0xe6, 0x50, 0x4b, 0xce, 0x4e, 0x9a, 0x47, 0xa0, 0xa6, 0x97, 0xd2, + 0xbd, 0x6a, 0x5e, 0x3f, 0x8e, 0xee, 0xc8, 0x25, 0x88, 0x96, 0x94, 0xa9, 0xbb, 0xd4, 0x16, 0xa8, + 0xfa, 0x2c, 0xc2, 0x34, 0x9f, 0x05, 0x66, 0x28, 0xe5, 0x08, 0x32, 0xbf, 0xba, 0x6b, 0x73, 0x82, + 0x55, 0x01, 0x62, 0xa9, 0x47, 0xc1, 0x82, 0xe9, 0xda, 0x25, 0x8a, 0x6a, 0xc6, 0xde, 0x95, 0x4a, + 0x9e, 0x26, 0x7e, 0x21, 0xaf, 0x4c, 0xe4, 0x13, 0x14, 0xd5, 0x8c, 0xbd, 0x2b, 0xe1, 0x3e, 0x80, + 0x03, 0xca, 0x85, 0x74, 0x4d, 0x73, 0xcb, 0xc3, 0xaa, 0x3a, 0x6c, 0x63, 0x9c, 0x39, 0xb7, 0xb0, + 0xa8, 0xad, 0xb1, 0xbd, 0x02, 0xda, 0x95, 0xf0, 0x39, 0xa8, 0x08, 0x89, 0x25, 0xd1, 0x53, 0xa2, + 0x35, 0xf9, 0xa6, 0x54, 0xd2, 0x76, 0x4e, 0x15, 0xd3, 0xab, 0x8d, 0x33, 0x27, 0x17, 0xa1, 0xfc, + 0x47, 0x0d, 0x38, 0x8f, 0x84, 0xa1, 0x4b, 0x7d, 0x33, 0x2c, 0xf4, 0x80, 0x33, 0x10, 0xaa, 0x2a, + 0xe3, 0x50, 0xa7, 0x28, 0x6f, 0xd2, 0x66, 0x48, 0xe4, 0x73, 0x5b, 0x23, 0xc8, 0xfc, 0x2a, 0xcd, + 0x00, 0xd3, 0x90, 0xe4, 0xb3, 0x61, 0x29, 0xd7, 0xe4, 0x08, 0x32, 0xbf, 0xaa, 0x70, 0x94, 0x95, + 0x72, 0xe2, 0x72, 0x82, 0x05, 0x8b, 0xcd, 0x4c, 0xd0, 0x85, 0x33, 0xcb, 0xa0, 0xa6, 0xf1, 0x91, + 0x76, 0xe1, 0x6b, 0xb0, 0xcc, 0xc9, 0xaf, 0xc4, 0xcb, 0x07, 0x83, 0xea, 0x49, 0x7a, 0x22, 0x54, + 0x7a, 0xab, 0xe3, 0xcc, 0xb9, 0x49, 0xa1, 0x56, 0x09, 0xec, 0x29, 0x1f, 0xfe, 0x04, 0xda, 0x13, + 0x89, 0xd9, 0x5a, 0x4f, 0x89, 0xde, 0xda, 0x38, 0x73, 0xbe, 0xe2, 0xd0, 0x64, 0xc1, 0x7c, 0xfb, + 0xad, 0x23, 0x50, 0xd1, 0x29, 0x84, 0x75, 0xb0, 0x78, 0x18, 0x0f, 0x71, 0x48, 0xfd, 0xf6, 0x9c, + 0x72, 0x4e, 0x48, 0xec, 0xd3, 0x38, 0x68, 0x5b, 0xca, 0x41, 0x69, 0x1c, 0x2b, 0x67, 0x1e, 0x36, + 0x41, 0xad, 0x7c, 0x9b, 0xf6, 0x82, 0x72, 0x11, 0x11, 0x2c, 0x1c, 0x2a, 0xf6, 0x4e, 0xef, 0xc5, + 0xd5, 0xa7, 0x8e, 0xf5, 0xf1, 0x53, 0x67, 0xee, 0xcb, 0xa7, 0x8e, 0xf5, 0xdb, 0x75, 0xc7, 0xfa, + 0xf3, 0xba, 0x63, 0xfd, 0x75, 0xdd, 0xb1, 0xae, 0xae, 0x3b, 0xd6, 0xdf, 0xd7, 0x1d, 0xeb, 0x9f, + 0xeb, 0xce, 0xdc, 0x97, 0xeb, 0x8e, 0xf5, 0xfb, 0xe7, 0xce, 0xdc, 0xd5, 0xe7, 0xce, 0xdc, 0xc7, + 0xcf, 0x9d, 0xb9, 0x7e, 0x55, 0xff, 0xb1, 0x7a, 0xfe, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe5, + 0x48, 0xdf, 0x94, 0x75, 0x0a, 0x00, 0x00, +} + +func (x Task_State) String() string { + s, ok := Task_State_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *TaskDefinition) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TaskDefinition) + if !ok { + that2, ok := that.(TaskDefinition) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.RootFs != that1.RootFs { + return false + } + if len(this.EnvironmentVariables) != len(that1.EnvironmentVariables) { + return false + } + for i := range this.EnvironmentVariables { + if !this.EnvironmentVariables[i].Equal(that1.EnvironmentVariables[i]) { + return false + } + } + if !this.Action.Equal(that1.Action) { + return false + } + if this.DiskMb != that1.DiskMb { + return false + } + if this.MemoryMb != that1.MemoryMb { + return false + } + if this.CpuWeight != that1.CpuWeight { + return false + } + if this.Privileged != that1.Privileged { + return false + } + if this.LogSource != that1.LogSource { + return false + } + if this.LogGuid != that1.LogGuid { + return false + } + if this.MetricsGuid != that1.MetricsGuid { + return false + } + if this.ResultFile != that1.ResultFile { + return false + } + if this.CompletionCallbackUrl != that1.CompletionCallbackUrl { + return false + } + if this.Annotation != that1.Annotation { + return false + } + if len(this.EgressRules) != len(that1.EgressRules) { + return false + } + for i := range this.EgressRules { + if !this.EgressRules[i].Equal(that1.EgressRules[i]) { + return false + } + } + if len(this.CachedDependencies) != len(that1.CachedDependencies) { + return false + } + for i := range this.CachedDependencies { + if !this.CachedDependencies[i].Equal(that1.CachedDependencies[i]) { + return false + } + } + if this.LegacyDownloadUser != that1.LegacyDownloadUser { + return false + } + if this.TrustedSystemCertificatesPath != that1.TrustedSystemCertificatesPath { + return false + } + if len(this.VolumeMounts) != len(that1.VolumeMounts) { + return false + } + for i := range this.VolumeMounts { + if !this.VolumeMounts[i].Equal(that1.VolumeMounts[i]) { + return false + } + } + if !this.Network.Equal(that1.Network) { + return false + } + if len(this.PlacementTags) != len(that1.PlacementTags) { + return false + } + for i := range this.PlacementTags { + if this.PlacementTags[i] != that1.PlacementTags[i] { + return false + } + } + if this.MaxPids != that1.MaxPids { + return false + } + if !this.CertificateProperties.Equal(that1.CertificateProperties) { + return false + } + if this.ImageUsername != that1.ImageUsername { + return false + } + if this.ImagePassword != that1.ImagePassword { + return false + } + if len(this.ImageLayers) != len(that1.ImageLayers) { + return false + } + for i := range this.ImageLayers { + if !this.ImageLayers[i].Equal(that1.ImageLayers[i]) { + return false + } + } + if !this.LogRateLimit.Equal(that1.LogRateLimit) { + return false + } + if len(this.MetricTags) != len(that1.MetricTags) { + return false + } + for i := range this.MetricTags { + if !this.MetricTags[i].Equal(that1.MetricTags[i]) { + return false + } + } + return true +} +func (this *Task) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Task) + if !ok { + that2, ok := that.(Task) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.TaskDefinition.Equal(that1.TaskDefinition) { + return false + } + if this.TaskGuid != that1.TaskGuid { + return false + } + if this.Domain != that1.Domain { + return false + } + if this.CreatedAt != that1.CreatedAt { + return false + } + if this.UpdatedAt != that1.UpdatedAt { + return false + } + if this.FirstCompletedAt != that1.FirstCompletedAt { + return false + } + if this.State != that1.State { + return false + } + if this.CellId != that1.CellId { + return false + } + if this.Result != that1.Result { + return false + } + if this.Failed != that1.Failed { + return false + } + if this.FailureReason != that1.FailureReason { + return false + } + if this.RejectionCount != that1.RejectionCount { + return false + } + if this.RejectionReason != that1.RejectionReason { + return false + } + return true +} +func (this *TaskDefinition) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 31) + s = append(s, "&models.TaskDefinition{") + s = append(s, "RootFs: "+fmt.Sprintf("%#v", this.RootFs)+",\n") + if this.EnvironmentVariables != nil { + s = append(s, "EnvironmentVariables: "+fmt.Sprintf("%#v", this.EnvironmentVariables)+",\n") + } + if this.Action != nil { + s = append(s, "Action: "+fmt.Sprintf("%#v", this.Action)+",\n") + } + s = append(s, "DiskMb: "+fmt.Sprintf("%#v", this.DiskMb)+",\n") + s = append(s, "MemoryMb: "+fmt.Sprintf("%#v", this.MemoryMb)+",\n") + s = append(s, "CpuWeight: "+fmt.Sprintf("%#v", this.CpuWeight)+",\n") + s = append(s, "Privileged: "+fmt.Sprintf("%#v", this.Privileged)+",\n") + s = append(s, "LogSource: "+fmt.Sprintf("%#v", this.LogSource)+",\n") + s = append(s, "LogGuid: "+fmt.Sprintf("%#v", this.LogGuid)+",\n") + s = append(s, "MetricsGuid: "+fmt.Sprintf("%#v", this.MetricsGuid)+",\n") + s = append(s, "ResultFile: "+fmt.Sprintf("%#v", this.ResultFile)+",\n") + s = append(s, "CompletionCallbackUrl: "+fmt.Sprintf("%#v", this.CompletionCallbackUrl)+",\n") + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + if this.EgressRules != nil { + s = append(s, "EgressRules: "+fmt.Sprintf("%#v", this.EgressRules)+",\n") + } + if this.CachedDependencies != nil { + s = append(s, "CachedDependencies: "+fmt.Sprintf("%#v", this.CachedDependencies)+",\n") + } + s = append(s, "LegacyDownloadUser: "+fmt.Sprintf("%#v", this.LegacyDownloadUser)+",\n") + s = append(s, "TrustedSystemCertificatesPath: "+fmt.Sprintf("%#v", this.TrustedSystemCertificatesPath)+",\n") + if this.VolumeMounts != nil { + s = append(s, "VolumeMounts: "+fmt.Sprintf("%#v", this.VolumeMounts)+",\n") + } + if this.Network != nil { + s = append(s, "Network: "+fmt.Sprintf("%#v", this.Network)+",\n") + } + s = append(s, "PlacementTags: "+fmt.Sprintf("%#v", this.PlacementTags)+",\n") + s = append(s, "MaxPids: "+fmt.Sprintf("%#v", this.MaxPids)+",\n") + if this.CertificateProperties != nil { + s = append(s, "CertificateProperties: "+fmt.Sprintf("%#v", this.CertificateProperties)+",\n") + } + s = append(s, "ImageUsername: "+fmt.Sprintf("%#v", this.ImageUsername)+",\n") + s = append(s, "ImagePassword: "+fmt.Sprintf("%#v", this.ImagePassword)+",\n") + if this.ImageLayers != nil { + s = append(s, "ImageLayers: "+fmt.Sprintf("%#v", this.ImageLayers)+",\n") + } + if this.LogRateLimit != nil { + s = append(s, "LogRateLimit: "+fmt.Sprintf("%#v", this.LogRateLimit)+",\n") + } + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]*MetricTagValue{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%#v: %#v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + if this.MetricTags != nil { + s = append(s, "MetricTags: "+mapStringForMetricTags+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Task) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 17) + s = append(s, "&models.Task{") + if this.TaskDefinition != nil { + s = append(s, "TaskDefinition: "+fmt.Sprintf("%#v", this.TaskDefinition)+",\n") + } + s = append(s, "TaskGuid: "+fmt.Sprintf("%#v", this.TaskGuid)+",\n") + s = append(s, "Domain: "+fmt.Sprintf("%#v", this.Domain)+",\n") + s = append(s, "CreatedAt: "+fmt.Sprintf("%#v", this.CreatedAt)+",\n") + s = append(s, "UpdatedAt: "+fmt.Sprintf("%#v", this.UpdatedAt)+",\n") + s = append(s, "FirstCompletedAt: "+fmt.Sprintf("%#v", this.FirstCompletedAt)+",\n") + s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") + s = append(s, "CellId: "+fmt.Sprintf("%#v", this.CellId)+",\n") + s = append(s, "Result: "+fmt.Sprintf("%#v", this.Result)+",\n") + s = append(s, "Failed: "+fmt.Sprintf("%#v", this.Failed)+",\n") + s = append(s, "FailureReason: "+fmt.Sprintf("%#v", this.FailureReason)+",\n") + s = append(s, "RejectionCount: "+fmt.Sprintf("%#v", this.RejectionCount)+",\n") + s = append(s, "RejectionReason: "+fmt.Sprintf("%#v", this.RejectionReason)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringTask(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *TaskDefinition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskDefinition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.MetricTags) > 0 { + for k := range m.MetricTags { + v := m.MetricTags[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTask(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintTask(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintTask(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + } + if m.LogRateLimit != nil { + { + size, err := m.LogRateLimit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTask(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + if len(m.ImageLayers) > 0 { + for iNdEx := len(m.ImageLayers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImageLayers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTask(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } + if len(m.ImagePassword) > 0 { + i -= len(m.ImagePassword) + copy(dAtA[i:], m.ImagePassword) + i = encodeVarintTask(dAtA, i, uint64(len(m.ImagePassword))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.ImageUsername) > 0 { + i -= len(m.ImageUsername) + copy(dAtA[i:], m.ImageUsername) + i = encodeVarintTask(dAtA, i, uint64(len(m.ImageUsername))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if m.CertificateProperties != nil { + { + size, err := m.CertificateProperties.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTask(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if m.MaxPids != 0 { + i = encodeVarintTask(dAtA, i, uint64(m.MaxPids)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if len(m.PlacementTags) > 0 { + for iNdEx := len(m.PlacementTags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PlacementTags[iNdEx]) + copy(dAtA[i:], m.PlacementTags[iNdEx]) + i = encodeVarintTask(dAtA, i, uint64(len(m.PlacementTags[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + } + if m.Network != nil { + { + size, err := m.Network.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTask(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if len(m.VolumeMounts) > 0 { + for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTask(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + } + if len(m.TrustedSystemCertificatesPath) > 0 { + i -= len(m.TrustedSystemCertificatesPath) + copy(dAtA[i:], m.TrustedSystemCertificatesPath) + i = encodeVarintTask(dAtA, i, uint64(len(m.TrustedSystemCertificatesPath))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if len(m.LegacyDownloadUser) > 0 { + i -= len(m.LegacyDownloadUser) + copy(dAtA[i:], m.LegacyDownloadUser) + i = encodeVarintTask(dAtA, i, uint64(len(m.LegacyDownloadUser))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if len(m.CachedDependencies) > 0 { + for iNdEx := len(m.CachedDependencies) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CachedDependencies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTask(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + } + if len(m.EgressRules) > 0 { + for iNdEx := len(m.EgressRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EgressRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTask(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + } + if len(m.Annotation) > 0 { + i -= len(m.Annotation) + copy(dAtA[i:], m.Annotation) + i = encodeVarintTask(dAtA, i, uint64(len(m.Annotation))) + i-- + dAtA[i] = 0x6a + } + if len(m.CompletionCallbackUrl) > 0 { + i -= len(m.CompletionCallbackUrl) + copy(dAtA[i:], m.CompletionCallbackUrl) + i = encodeVarintTask(dAtA, i, uint64(len(m.CompletionCallbackUrl))) + i-- + dAtA[i] = 0x62 + } + if len(m.ResultFile) > 0 { + i -= len(m.ResultFile) + copy(dAtA[i:], m.ResultFile) + i = encodeVarintTask(dAtA, i, uint64(len(m.ResultFile))) + i-- + dAtA[i] = 0x5a + } + if len(m.MetricsGuid) > 0 { + i -= len(m.MetricsGuid) + copy(dAtA[i:], m.MetricsGuid) + i = encodeVarintTask(dAtA, i, uint64(len(m.MetricsGuid))) + i-- + dAtA[i] = 0x52 + } + if len(m.LogGuid) > 0 { + i -= len(m.LogGuid) + copy(dAtA[i:], m.LogGuid) + i = encodeVarintTask(dAtA, i, uint64(len(m.LogGuid))) + i-- + dAtA[i] = 0x4a + } + if len(m.LogSource) > 0 { + i -= len(m.LogSource) + copy(dAtA[i:], m.LogSource) + i = encodeVarintTask(dAtA, i, uint64(len(m.LogSource))) + i-- + dAtA[i] = 0x42 + } + if m.Privileged { + i-- + if m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.CpuWeight != 0 { + i = encodeVarintTask(dAtA, i, uint64(m.CpuWeight)) + i-- + dAtA[i] = 0x30 + } + if m.MemoryMb != 0 { + i = encodeVarintTask(dAtA, i, uint64(m.MemoryMb)) + i-- + dAtA[i] = 0x28 + } + if m.DiskMb != 0 { + i = encodeVarintTask(dAtA, i, uint64(m.DiskMb)) + i-- + dAtA[i] = 0x20 + } + if m.Action != nil { + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTask(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.EnvironmentVariables) > 0 { + for iNdEx := len(m.EnvironmentVariables) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EnvironmentVariables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTask(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.RootFs) > 0 { + i -= len(m.RootFs) + copy(dAtA[i:], m.RootFs) + i = encodeVarintTask(dAtA, i, uint64(len(m.RootFs))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Task) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Task) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Task) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RejectionReason) > 0 { + i -= len(m.RejectionReason) + copy(dAtA[i:], m.RejectionReason) + i = encodeVarintTask(dAtA, i, uint64(len(m.RejectionReason))) + i-- + dAtA[i] = 0x6a + } + if m.RejectionCount != 0 { + i = encodeVarintTask(dAtA, i, uint64(m.RejectionCount)) + i-- + dAtA[i] = 0x60 + } + if len(m.FailureReason) > 0 { + i -= len(m.FailureReason) + copy(dAtA[i:], m.FailureReason) + i = encodeVarintTask(dAtA, i, uint64(len(m.FailureReason))) + i-- + dAtA[i] = 0x5a + } + if m.Failed { + i-- + if m.Failed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if len(m.Result) > 0 { + i -= len(m.Result) + copy(dAtA[i:], m.Result) + i = encodeVarintTask(dAtA, i, uint64(len(m.Result))) + i-- + dAtA[i] = 0x4a + } + if len(m.CellId) > 0 { + i -= len(m.CellId) + copy(dAtA[i:], m.CellId) + i = encodeVarintTask(dAtA, i, uint64(len(m.CellId))) + i-- + dAtA[i] = 0x42 + } + if m.State != 0 { + i = encodeVarintTask(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x38 + } + if m.FirstCompletedAt != 0 { + i = encodeVarintTask(dAtA, i, uint64(m.FirstCompletedAt)) + i-- + dAtA[i] = 0x30 + } + if m.UpdatedAt != 0 { + i = encodeVarintTask(dAtA, i, uint64(m.UpdatedAt)) + i-- + dAtA[i] = 0x28 + } + if m.CreatedAt != 0 { + i = encodeVarintTask(dAtA, i, uint64(m.CreatedAt)) + i-- + dAtA[i] = 0x20 + } + if len(m.Domain) > 0 { + i -= len(m.Domain) + copy(dAtA[i:], m.Domain) + i = encodeVarintTask(dAtA, i, uint64(len(m.Domain))) + i-- + dAtA[i] = 0x1a + } + if len(m.TaskGuid) > 0 { + i -= len(m.TaskGuid) + copy(dAtA[i:], m.TaskGuid) + i = encodeVarintTask(dAtA, i, uint64(len(m.TaskGuid))) + i-- + dAtA[i] = 0x12 + } + if m.TaskDefinition != nil { + { + size, err := m.TaskDefinition.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTask(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintTask(dAtA []byte, offset int, v uint64) int { + offset -= sovTask(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *TaskDefinition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RootFs) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + if len(m.EnvironmentVariables) > 0 { + for _, e := range m.EnvironmentVariables { + l = e.Size() + n += 1 + l + sovTask(uint64(l)) + } + } + if m.Action != nil { + l = m.Action.Size() + n += 1 + l + sovTask(uint64(l)) + } + if m.DiskMb != 0 { + n += 1 + sovTask(uint64(m.DiskMb)) + } + if m.MemoryMb != 0 { + n += 1 + sovTask(uint64(m.MemoryMb)) + } + if m.CpuWeight != 0 { + n += 1 + sovTask(uint64(m.CpuWeight)) + } + if m.Privileged { + n += 2 + } + l = len(m.LogSource) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.LogGuid) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.MetricsGuid) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.ResultFile) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.CompletionCallbackUrl) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.Annotation) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + if len(m.EgressRules) > 0 { + for _, e := range m.EgressRules { + l = e.Size() + n += 1 + l + sovTask(uint64(l)) + } + } + if len(m.CachedDependencies) > 0 { + for _, e := range m.CachedDependencies { + l = e.Size() + n += 1 + l + sovTask(uint64(l)) + } + } + l = len(m.LegacyDownloadUser) + if l > 0 { + n += 2 + l + sovTask(uint64(l)) + } + l = len(m.TrustedSystemCertificatesPath) + if l > 0 { + n += 2 + l + sovTask(uint64(l)) + } + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 2 + l + sovTask(uint64(l)) + } + } + if m.Network != nil { + l = m.Network.Size() + n += 2 + l + sovTask(uint64(l)) + } + if len(m.PlacementTags) > 0 { + for _, s := range m.PlacementTags { + l = len(s) + n += 2 + l + sovTask(uint64(l)) + } + } + if m.MaxPids != 0 { + n += 2 + sovTask(uint64(m.MaxPids)) + } + if m.CertificateProperties != nil { + l = m.CertificateProperties.Size() + n += 2 + l + sovTask(uint64(l)) + } + l = len(m.ImageUsername) + if l > 0 { + n += 2 + l + sovTask(uint64(l)) + } + l = len(m.ImagePassword) + if l > 0 { + n += 2 + l + sovTask(uint64(l)) + } + if len(m.ImageLayers) > 0 { + for _, e := range m.ImageLayers { + l = e.Size() + n += 2 + l + sovTask(uint64(l)) + } + } + if m.LogRateLimit != nil { + l = m.LogRateLimit.Size() + n += 2 + l + sovTask(uint64(l)) + } + if len(m.MetricTags) > 0 { + for k, v := range m.MetricTags { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovTask(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovTask(uint64(len(k))) + l + n += mapEntrySize + 2 + sovTask(uint64(mapEntrySize)) + } + } + return n +} + +func (m *Task) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TaskDefinition != nil { + l = m.TaskDefinition.Size() + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.TaskGuid) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.Domain) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + if m.CreatedAt != 0 { + n += 1 + sovTask(uint64(m.CreatedAt)) + } + if m.UpdatedAt != 0 { + n += 1 + sovTask(uint64(m.UpdatedAt)) + } + if m.FirstCompletedAt != 0 { + n += 1 + sovTask(uint64(m.FirstCompletedAt)) + } + if m.State != 0 { + n += 1 + sovTask(uint64(m.State)) + } + l = len(m.CellId) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.Result) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + if m.Failed { + n += 2 + } + l = len(m.FailureReason) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + if m.RejectionCount != 0 { + n += 1 + sovTask(uint64(m.RejectionCount)) + } + l = len(m.RejectionReason) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + return n +} + +func sovTask(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTask(x uint64) (n int) { + return sovTask(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *TaskDefinition) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnvironmentVariables := "[]*EnvironmentVariable{" + for _, f := range this.EnvironmentVariables { + repeatedStringForEnvironmentVariables += strings.Replace(fmt.Sprintf("%v", f), "EnvironmentVariable", "EnvironmentVariable", 1) + "," + } + repeatedStringForEnvironmentVariables += "}" + repeatedStringForEgressRules := "[]*SecurityGroupRule{" + for _, f := range this.EgressRules { + repeatedStringForEgressRules += strings.Replace(fmt.Sprintf("%v", f), "SecurityGroupRule", "SecurityGroupRule", 1) + "," + } + repeatedStringForEgressRules += "}" + repeatedStringForCachedDependencies := "[]*CachedDependency{" + for _, f := range this.CachedDependencies { + repeatedStringForCachedDependencies += strings.Replace(fmt.Sprintf("%v", f), "CachedDependency", "CachedDependency", 1) + "," + } + repeatedStringForCachedDependencies += "}" + repeatedStringForVolumeMounts := "[]*VolumeMount{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += strings.Replace(fmt.Sprintf("%v", f), "VolumeMount", "VolumeMount", 1) + "," + } + repeatedStringForVolumeMounts += "}" + repeatedStringForImageLayers := "[]*ImageLayer{" + for _, f := range this.ImageLayers { + repeatedStringForImageLayers += strings.Replace(fmt.Sprintf("%v", f), "ImageLayer", "ImageLayer", 1) + "," + } + repeatedStringForImageLayers += "}" + keysForMetricTags := make([]string, 0, len(this.MetricTags)) + for k, _ := range this.MetricTags { + keysForMetricTags = append(keysForMetricTags, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetricTags) + mapStringForMetricTags := "map[string]*MetricTagValue{" + for _, k := range keysForMetricTags { + mapStringForMetricTags += fmt.Sprintf("%v: %v,", k, this.MetricTags[k]) + } + mapStringForMetricTags += "}" + s := strings.Join([]string{`&TaskDefinition{`, + `RootFs:` + fmt.Sprintf("%v", this.RootFs) + `,`, + `EnvironmentVariables:` + repeatedStringForEnvironmentVariables + `,`, + `Action:` + strings.Replace(fmt.Sprintf("%v", this.Action), "Action", "Action", 1) + `,`, + `DiskMb:` + fmt.Sprintf("%v", this.DiskMb) + `,`, + `MemoryMb:` + fmt.Sprintf("%v", this.MemoryMb) + `,`, + `CpuWeight:` + fmt.Sprintf("%v", this.CpuWeight) + `,`, + `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, + `LogSource:` + fmt.Sprintf("%v", this.LogSource) + `,`, + `LogGuid:` + fmt.Sprintf("%v", this.LogGuid) + `,`, + `MetricsGuid:` + fmt.Sprintf("%v", this.MetricsGuid) + `,`, + `ResultFile:` + fmt.Sprintf("%v", this.ResultFile) + `,`, + `CompletionCallbackUrl:` + fmt.Sprintf("%v", this.CompletionCallbackUrl) + `,`, + `Annotation:` + fmt.Sprintf("%v", this.Annotation) + `,`, + `EgressRules:` + repeatedStringForEgressRules + `,`, + `CachedDependencies:` + repeatedStringForCachedDependencies + `,`, + `LegacyDownloadUser:` + fmt.Sprintf("%v", this.LegacyDownloadUser) + `,`, + `TrustedSystemCertificatesPath:` + fmt.Sprintf("%v", this.TrustedSystemCertificatesPath) + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `PlacementTags:` + fmt.Sprintf("%v", this.PlacementTags) + `,`, + `MaxPids:` + fmt.Sprintf("%v", this.MaxPids) + `,`, + `CertificateProperties:` + strings.Replace(fmt.Sprintf("%v", this.CertificateProperties), "CertificateProperties", "CertificateProperties", 1) + `,`, + `ImageUsername:` + fmt.Sprintf("%v", this.ImageUsername) + `,`, + `ImagePassword:` + fmt.Sprintf("%v", this.ImagePassword) + `,`, + `ImageLayers:` + repeatedStringForImageLayers + `,`, + `LogRateLimit:` + strings.Replace(fmt.Sprintf("%v", this.LogRateLimit), "LogRateLimit", "LogRateLimit", 1) + `,`, + `MetricTags:` + mapStringForMetricTags + `,`, + `}`, + }, "") + return s +} +func (this *Task) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Task{`, + `TaskDefinition:` + strings.Replace(this.TaskDefinition.String(), "TaskDefinition", "TaskDefinition", 1) + `,`, + `TaskGuid:` + fmt.Sprintf("%v", this.TaskGuid) + `,`, + `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`, + `CreatedAt:` + fmt.Sprintf("%v", this.CreatedAt) + `,`, + `UpdatedAt:` + fmt.Sprintf("%v", this.UpdatedAt) + `,`, + `FirstCompletedAt:` + fmt.Sprintf("%v", this.FirstCompletedAt) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `CellId:` + fmt.Sprintf("%v", this.CellId) + `,`, + `Result:` + fmt.Sprintf("%v", this.Result) + `,`, + `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, + `FailureReason:` + fmt.Sprintf("%v", this.FailureReason) + `,`, + `RejectionCount:` + fmt.Sprintf("%v", this.RejectionCount) + `,`, + `RejectionReason:` + fmt.Sprintf("%v", this.RejectionReason) + `,`, + `}`, + }, "") + return s +} +func valueToStringTask(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *TaskDefinition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskDefinition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootFs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RootFs = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EnvironmentVariables", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EnvironmentVariables = append(m.EnvironmentVariables, &EnvironmentVariable{}) + if err := m.EnvironmentVariables[len(m.EnvironmentVariables)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Action == nil { + m.Action = &Action{} + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiskMb", wireType) + } + m.DiskMb = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DiskMb |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryMb", wireType) + } + m.MemoryMb = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemoryMb |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CpuWeight", wireType) + } + m.CpuWeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CpuWeight |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Privileged = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricsGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricsGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResultFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResultFile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletionCallbackUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CompletionCallbackUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Annotation = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EgressRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EgressRules = append(m.EgressRules, &SecurityGroupRule{}) + if err := m.EgressRules[len(m.EgressRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CachedDependencies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CachedDependencies = append(m.CachedDependencies, &CachedDependency{}) + if err := m.CachedDependencies[len(m.CachedDependencies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LegacyDownloadUser", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LegacyDownloadUser = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustedSystemCertificatesPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TrustedSystemCertificatesPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeMounts = append(m.VolumeMounts, &VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Network == nil { + m.Network = &Network{} + } + if err := m.Network.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PlacementTags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PlacementTags = append(m.PlacementTags, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPids", wireType) + } + m.MaxPids = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxPids |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CertificateProperties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CertificateProperties == nil { + m.CertificateProperties = &CertificateProperties{} + } + if err := m.CertificateProperties.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageUsername", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageUsername = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePassword", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePassword = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageLayers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageLayers = append(m.ImageLayers, &ImageLayer{}) + if err := m.ImageLayers[len(m.ImageLayers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogRateLimit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LogRateLimit == nil { + m.LogRateLimit = &LogRateLimit{} + } + if err := m.LogRateLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 27: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricTags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricTags == nil { + m.MetricTags = make(map[string]*MetricTagValue) + } + var mapkey string + var mapvalue *MetricTagValue + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTask + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthTask + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthTask + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthTask + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &MetricTagValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipTask(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTask + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MetricTags[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTask(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Task) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Task: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Task: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskDefinition", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TaskDefinition == nil { + m.TaskDefinition = &TaskDefinition{} + } + if err := m.TaskDefinition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + m.CreatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) + } + m.UpdatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FirstCompletedAt", wireType) + } + m.FirstCompletedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FirstCompletedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= Task_State(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CellId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Result = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Failed = bool(v != 0) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FailureReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RejectionCount", wireType) + } + m.RejectionCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RejectionCount |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RejectionReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RejectionReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTask(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTask(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTask + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTask + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTask + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTask = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTask = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTask = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/task.proto b/vendor/code.cloudfoundry.org/bbs/models/task.proto new file mode 100644 index 00000000..2e4b1c27 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/task.proto @@ -0,0 +1,76 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "actions.proto"; +import "environment_variables.proto"; +import "security_group.proto"; +import "cached_dependency.proto"; +import "volume_mount.proto"; +import "network.proto"; +import "certificate_properties.proto"; +import "image_layer.proto"; +import "log_rate_limit.proto"; +import "metric_tags.proto"; + +option (gogoproto.goproto_enum_prefix_all) = true; + +message TaskDefinition { + string root_fs = 1 [(gogoproto.jsontag) = "rootfs"]; + repeated EnvironmentVariable environment_variables = 2 [(gogoproto.jsontag) = "env,omitempty"]; + Action action = 3; + int32 disk_mb = 4 [(gogoproto.jsontag) = "disk_mb"]; + int32 memory_mb = 5 [(gogoproto.jsontag) = "memory_mb"]; + uint32 cpu_weight = 6 [(gogoproto.jsontag) = "cpu_weight"]; + bool privileged = 7 [(gogoproto.jsontag) = "privileged"]; + string log_source = 8 [(gogoproto.jsontag) = "log_source"]; + string log_guid = 9 [(gogoproto.jsontag) = "log_guid"]; + string metrics_guid = 10 [(gogoproto.jsontag) = "metrics_guid"]; + string result_file = 11 [(gogoproto.jsontag) = "result_file"]; + string completion_callback_url = 12; + string annotation = 13; + repeated SecurityGroupRule egress_rules = 14; + repeated CachedDependency cached_dependencies = 15; + string legacy_download_user = 16 [deprecated=true]; + string trusted_system_certificates_path = 17; + repeated VolumeMount volume_mounts = 18; + Network network = 19; + repeated string placement_tags = 20; + int32 max_pids = 21 [(gogoproto.jsontag) = "max_pids"]; + CertificateProperties certificate_properties = 22; + string image_username = 23 [(gogoproto.jsontag) = "image_username"]; + string image_password = 24 [(gogoproto.jsontag) = "image_password"]; + repeated ImageLayer image_layers = 25; + LogRateLimit log_rate_limit = 26; + map metric_tags = 27; +} + +message Task { + enum State { + Invalid = 0; + Pending = 1; + Running = 2; + Completed = 3; + Resolving = 4; + } + + TaskDefinition task_definition = 1 [(gogoproto.jsontag) = "", (gogoproto.embed) = true]; + + string task_guid = 2 [(gogoproto.jsontag) = "task_guid"]; + string domain = 3 [(gogoproto.jsontag) = "domain"]; + int64 created_at = 4 [(gogoproto.jsontag) = "created_at"]; + int64 updated_at = 5 [(gogoproto.jsontag) = "updated_at"]; + int64 first_completed_at = 6 [(gogoproto.jsontag) = "first_completed_at"]; + + State state = 7 [(gogoproto.jsontag) = "state"]; + + string cell_id = 8 [(gogoproto.jsontag) = "cell_id"]; + + string result = 9 [(gogoproto.jsontag) = "result"]; + bool failed = 10 [(gogoproto.jsontag) = "failed"]; + string failure_reason = 11 [(gogoproto.jsontag) = "failure_reason"]; + int32 rejection_count = 12 [(gogoproto.jsontag) = "rejection_count"]; + string rejection_reason = 13 [(gogoproto.jsontag) = "rejection_reason"]; +} + diff --git a/vendor/code.cloudfoundry.org/bbs/models/task_requests.go b/vendor/code.cloudfoundry.org/bbs/models/task_requests.go new file mode 100644 index 00000000..a3fa65aa --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/task_requests.go @@ -0,0 +1,125 @@ +package models + +func (req *DesireTaskRequest) Validate() error { + var validationError ValidationError + + if !taskGuidPattern.MatchString(req.TaskGuid) { + validationError = validationError.Append(ErrInvalidField{"task_guid"}) + } + + if req.Domain == "" { + validationError = validationError.Append(ErrInvalidField{"domain"}) + } + + if req.TaskDefinition == nil { + validationError = validationError.Append(ErrInvalidField{"task_definition"}) + } else if defErr := req.TaskDefinition.Validate(); defErr != nil { + validationError = validationError.Append(defErr) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (req *StartTaskRequest) Validate() error { + var validationError ValidationError + + if !taskGuidPattern.MatchString(req.TaskGuid) { + validationError = validationError.Append(ErrInvalidField{"task_guid"}) + } + if req.CellId == "" { + validationError = validationError.Append(ErrInvalidField{"cell_id"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (req *CompleteTaskRequest) Validate() error { + var validationError ValidationError + + if !taskGuidPattern.MatchString(req.TaskGuid) { + validationError = validationError.Append(ErrInvalidField{"task_guid"}) + } + if req.CellId == "" { + validationError = validationError.Append(ErrInvalidField{"cell_id"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (req *FailTaskRequest) Validate() error { + var validationError ValidationError + + if !taskGuidPattern.MatchString(req.TaskGuid) { + validationError = validationError.Append(ErrInvalidField{"task_guid"}) + } + if req.FailureReason == "" { + validationError = validationError.Append(ErrInvalidField{"failure_reason"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (req *RejectTaskRequest) Validate() error { + var validationError ValidationError + + if !taskGuidPattern.MatchString(req.TaskGuid) { + validationError = validationError.Append(ErrInvalidField{"task_guid"}) + } + if req.RejectionReason == "" { + validationError = validationError.Append(ErrInvalidField{"failure_reason"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (req *TasksRequest) Validate() error { + return nil +} + +func (request *TaskByGuidRequest) Validate() error { + var validationError ValidationError + + if request.TaskGuid == "" { + validationError = validationError.Append(ErrInvalidField{"task_guid"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} + +func (request *TaskGuidRequest) Validate() error { + var validationError ValidationError + + if request.TaskGuid == "" { + validationError = validationError.Append(ErrInvalidField{"task_guid"}) + } + + if !validationError.Empty() { + return validationError + } + + return nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/task_requests.pb.go b/vendor/code.cloudfoundry.org/bbs/models/task_requests.pb.go new file mode 100644 index 00000000..0e1b642c --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/task_requests.pb.go @@ -0,0 +1,4016 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: task_requests.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type TaskLifecycleResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *TaskLifecycleResponse) Reset() { *m = TaskLifecycleResponse{} } +func (*TaskLifecycleResponse) ProtoMessage() {} +func (*TaskLifecycleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{0} +} +func (m *TaskLifecycleResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TaskLifecycleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TaskLifecycleResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TaskLifecycleResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskLifecycleResponse.Merge(m, src) +} +func (m *TaskLifecycleResponse) XXX_Size() int { + return m.Size() +} +func (m *TaskLifecycleResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TaskLifecycleResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskLifecycleResponse proto.InternalMessageInfo + +func (m *TaskLifecycleResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +type DesireTaskRequest struct { + TaskDefinition *TaskDefinition `protobuf:"bytes,1,opt,name=task_definition,json=taskDefinition,proto3" json:"task_definition"` + TaskGuid string `protobuf:"bytes,2,opt,name=task_guid,json=taskGuid,proto3" json:"task_guid"` + Domain string `protobuf:"bytes,3,opt,name=domain,proto3" json:"domain"` +} + +func (m *DesireTaskRequest) Reset() { *m = DesireTaskRequest{} } +func (*DesireTaskRequest) ProtoMessage() {} +func (*DesireTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{1} +} +func (m *DesireTaskRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DesireTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DesireTaskRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DesireTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DesireTaskRequest.Merge(m, src) +} +func (m *DesireTaskRequest) XXX_Size() int { + return m.Size() +} +func (m *DesireTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DesireTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DesireTaskRequest proto.InternalMessageInfo + +func (m *DesireTaskRequest) GetTaskDefinition() *TaskDefinition { + if m != nil { + return m.TaskDefinition + } + return nil +} + +func (m *DesireTaskRequest) GetTaskGuid() string { + if m != nil { + return m.TaskGuid + } + return "" +} + +func (m *DesireTaskRequest) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +type StartTaskRequest struct { + TaskGuid string `protobuf:"bytes,1,opt,name=task_guid,json=taskGuid,proto3" json:"task_guid"` + CellId string `protobuf:"bytes,2,opt,name=cell_id,json=cellId,proto3" json:"cell_id"` +} + +func (m *StartTaskRequest) Reset() { *m = StartTaskRequest{} } +func (*StartTaskRequest) ProtoMessage() {} +func (*StartTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{2} +} +func (m *StartTaskRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StartTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StartTaskRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StartTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartTaskRequest.Merge(m, src) +} +func (m *StartTaskRequest) XXX_Size() int { + return m.Size() +} +func (m *StartTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartTaskRequest proto.InternalMessageInfo + +func (m *StartTaskRequest) GetTaskGuid() string { + if m != nil { + return m.TaskGuid + } + return "" +} + +func (m *StartTaskRequest) GetCellId() string { + if m != nil { + return m.CellId + } + return "" +} + +type StartTaskResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + ShouldStart bool `protobuf:"varint,2,opt,name=should_start,json=shouldStart,proto3" json:"should_start"` +} + +func (m *StartTaskResponse) Reset() { *m = StartTaskResponse{} } +func (*StartTaskResponse) ProtoMessage() {} +func (*StartTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{3} +} +func (m *StartTaskResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StartTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StartTaskResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StartTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartTaskResponse.Merge(m, src) +} +func (m *StartTaskResponse) XXX_Size() int { + return m.Size() +} +func (m *StartTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StartTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StartTaskResponse proto.InternalMessageInfo + +func (m *StartTaskResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *StartTaskResponse) GetShouldStart() bool { + if m != nil { + return m.ShouldStart + } + return false +} + +// Deprecated: Do not use. +type FailTaskRequest struct { + TaskGuid string `protobuf:"bytes,1,opt,name=task_guid,json=taskGuid,proto3" json:"task_guid"` + FailureReason string `protobuf:"bytes,2,opt,name=failure_reason,json=failureReason,proto3" json:"failure_reason"` +} + +func (m *FailTaskRequest) Reset() { *m = FailTaskRequest{} } +func (*FailTaskRequest) ProtoMessage() {} +func (*FailTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{4} +} +func (m *FailTaskRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FailTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FailTaskRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FailTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_FailTaskRequest.Merge(m, src) +} +func (m *FailTaskRequest) XXX_Size() int { + return m.Size() +} +func (m *FailTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_FailTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_FailTaskRequest proto.InternalMessageInfo + +func (m *FailTaskRequest) GetTaskGuid() string { + if m != nil { + return m.TaskGuid + } + return "" +} + +func (m *FailTaskRequest) GetFailureReason() string { + if m != nil { + return m.FailureReason + } + return "" +} + +type RejectTaskRequest struct { + TaskGuid string `protobuf:"bytes,1,opt,name=task_guid,json=taskGuid,proto3" json:"task_guid"` + RejectionReason string `protobuf:"bytes,2,opt,name=rejection_reason,json=rejectionReason,proto3" json:"rejection_reason"` +} + +func (m *RejectTaskRequest) Reset() { *m = RejectTaskRequest{} } +func (*RejectTaskRequest) ProtoMessage() {} +func (*RejectTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{5} +} +func (m *RejectTaskRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RejectTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RejectTaskRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RejectTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RejectTaskRequest.Merge(m, src) +} +func (m *RejectTaskRequest) XXX_Size() int { + return m.Size() +} +func (m *RejectTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RejectTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RejectTaskRequest proto.InternalMessageInfo + +func (m *RejectTaskRequest) GetTaskGuid() string { + if m != nil { + return m.TaskGuid + } + return "" +} + +func (m *RejectTaskRequest) GetRejectionReason() string { + if m != nil { + return m.RejectionReason + } + return "" +} + +type TaskGuidRequest struct { + TaskGuid string `protobuf:"bytes,1,opt,name=task_guid,json=taskGuid,proto3" json:"task_guid"` +} + +func (m *TaskGuidRequest) Reset() { *m = TaskGuidRequest{} } +func (*TaskGuidRequest) ProtoMessage() {} +func (*TaskGuidRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{6} +} +func (m *TaskGuidRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TaskGuidRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TaskGuidRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TaskGuidRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskGuidRequest.Merge(m, src) +} +func (m *TaskGuidRequest) XXX_Size() int { + return m.Size() +} +func (m *TaskGuidRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TaskGuidRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskGuidRequest proto.InternalMessageInfo + +func (m *TaskGuidRequest) GetTaskGuid() string { + if m != nil { + return m.TaskGuid + } + return "" +} + +type CompleteTaskRequest struct { + TaskGuid string `protobuf:"bytes,1,opt,name=task_guid,json=taskGuid,proto3" json:"task_guid"` + CellId string `protobuf:"bytes,2,opt,name=cell_id,json=cellId,proto3" json:"cell_id"` + Failed bool `protobuf:"varint,3,opt,name=failed,proto3" json:"failed"` + FailureReason string `protobuf:"bytes,4,opt,name=failure_reason,json=failureReason,proto3" json:"failure_reason"` + Result string `protobuf:"bytes,5,opt,name=result,proto3" json:"result"` +} + +func (m *CompleteTaskRequest) Reset() { *m = CompleteTaskRequest{} } +func (*CompleteTaskRequest) ProtoMessage() {} +func (*CompleteTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{7} +} +func (m *CompleteTaskRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CompleteTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CompleteTaskRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CompleteTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompleteTaskRequest.Merge(m, src) +} +func (m *CompleteTaskRequest) XXX_Size() int { + return m.Size() +} +func (m *CompleteTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CompleteTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CompleteTaskRequest proto.InternalMessageInfo + +func (m *CompleteTaskRequest) GetTaskGuid() string { + if m != nil { + return m.TaskGuid + } + return "" +} + +func (m *CompleteTaskRequest) GetCellId() string { + if m != nil { + return m.CellId + } + return "" +} + +func (m *CompleteTaskRequest) GetFailed() bool { + if m != nil { + return m.Failed + } + return false +} + +func (m *CompleteTaskRequest) GetFailureReason() string { + if m != nil { + return m.FailureReason + } + return "" +} + +func (m *CompleteTaskRequest) GetResult() string { + if m != nil { + return m.Result + } + return "" +} + +type TaskCallbackResponse struct { + TaskGuid string `protobuf:"bytes,1,opt,name=task_guid,json=taskGuid,proto3" json:"task_guid"` + Failed bool `protobuf:"varint,2,opt,name=failed,proto3" json:"failed"` + FailureReason string `protobuf:"bytes,3,opt,name=failure_reason,json=failureReason,proto3" json:"failure_reason"` + Result string `protobuf:"bytes,4,opt,name=result,proto3" json:"result"` + Annotation string `protobuf:"bytes,5,opt,name=annotation,proto3" json:"annotation,omitempty"` + CreatedAt int64 `protobuf:"varint,6,opt,name=created_at,json=createdAt,proto3" json:"created_at"` +} + +func (m *TaskCallbackResponse) Reset() { *m = TaskCallbackResponse{} } +func (*TaskCallbackResponse) ProtoMessage() {} +func (*TaskCallbackResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{8} +} +func (m *TaskCallbackResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TaskCallbackResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TaskCallbackResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TaskCallbackResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskCallbackResponse.Merge(m, src) +} +func (m *TaskCallbackResponse) XXX_Size() int { + return m.Size() +} +func (m *TaskCallbackResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TaskCallbackResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskCallbackResponse proto.InternalMessageInfo + +func (m *TaskCallbackResponse) GetTaskGuid() string { + if m != nil { + return m.TaskGuid + } + return "" +} + +func (m *TaskCallbackResponse) GetFailed() bool { + if m != nil { + return m.Failed + } + return false +} + +func (m *TaskCallbackResponse) GetFailureReason() string { + if m != nil { + return m.FailureReason + } + return "" +} + +func (m *TaskCallbackResponse) GetResult() string { + if m != nil { + return m.Result + } + return "" +} + +func (m *TaskCallbackResponse) GetAnnotation() string { + if m != nil { + return m.Annotation + } + return "" +} + +func (m *TaskCallbackResponse) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +type TasksRequest struct { + Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain"` + CellId string `protobuf:"bytes,2,opt,name=cell_id,json=cellId,proto3" json:"cell_id"` +} + +func (m *TasksRequest) Reset() { *m = TasksRequest{} } +func (*TasksRequest) ProtoMessage() {} +func (*TasksRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{9} +} +func (m *TasksRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TasksRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TasksRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TasksRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TasksRequest.Merge(m, src) +} +func (m *TasksRequest) XXX_Size() int { + return m.Size() +} +func (m *TasksRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TasksRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TasksRequest proto.InternalMessageInfo + +func (m *TasksRequest) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +func (m *TasksRequest) GetCellId() string { + if m != nil { + return m.CellId + } + return "" +} + +type TasksResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + Tasks []*Task `protobuf:"bytes,2,rep,name=tasks,proto3" json:"tasks,omitempty"` +} + +func (m *TasksResponse) Reset() { *m = TasksResponse{} } +func (*TasksResponse) ProtoMessage() {} +func (*TasksResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{10} +} +func (m *TasksResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TasksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TasksResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TasksResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TasksResponse.Merge(m, src) +} +func (m *TasksResponse) XXX_Size() int { + return m.Size() +} +func (m *TasksResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TasksResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TasksResponse proto.InternalMessageInfo + +func (m *TasksResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *TasksResponse) GetTasks() []*Task { + if m != nil { + return m.Tasks + } + return nil +} + +type TaskByGuidRequest struct { + TaskGuid string `protobuf:"bytes,1,opt,name=task_guid,json=taskGuid,proto3" json:"task_guid"` +} + +func (m *TaskByGuidRequest) Reset() { *m = TaskByGuidRequest{} } +func (*TaskByGuidRequest) ProtoMessage() {} +func (*TaskByGuidRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{11} +} +func (m *TaskByGuidRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TaskByGuidRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TaskByGuidRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TaskByGuidRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskByGuidRequest.Merge(m, src) +} +func (m *TaskByGuidRequest) XXX_Size() int { + return m.Size() +} +func (m *TaskByGuidRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TaskByGuidRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskByGuidRequest proto.InternalMessageInfo + +func (m *TaskByGuidRequest) GetTaskGuid() string { + if m != nil { + return m.TaskGuid + } + return "" +} + +type TaskResponse struct { + Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + Task *Task `protobuf:"bytes,2,opt,name=task,proto3" json:"task,omitempty"` +} + +func (m *TaskResponse) Reset() { *m = TaskResponse{} } +func (*TaskResponse) ProtoMessage() {} +func (*TaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_13f778b8a0251259, []int{12} +} +func (m *TaskResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TaskResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskResponse.Merge(m, src) +} +func (m *TaskResponse) XXX_Size() int { + return m.Size() +} +func (m *TaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskResponse proto.InternalMessageInfo + +func (m *TaskResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *TaskResponse) GetTask() *Task { + if m != nil { + return m.Task + } + return nil +} + +func init() { + proto.RegisterType((*TaskLifecycleResponse)(nil), "models.TaskLifecycleResponse") + proto.RegisterType((*DesireTaskRequest)(nil), "models.DesireTaskRequest") + proto.RegisterType((*StartTaskRequest)(nil), "models.StartTaskRequest") + proto.RegisterType((*StartTaskResponse)(nil), "models.StartTaskResponse") + proto.RegisterType((*FailTaskRequest)(nil), "models.FailTaskRequest") + proto.RegisterType((*RejectTaskRequest)(nil), "models.RejectTaskRequest") + proto.RegisterType((*TaskGuidRequest)(nil), "models.TaskGuidRequest") + proto.RegisterType((*CompleteTaskRequest)(nil), "models.CompleteTaskRequest") + proto.RegisterType((*TaskCallbackResponse)(nil), "models.TaskCallbackResponse") + proto.RegisterType((*TasksRequest)(nil), "models.TasksRequest") + proto.RegisterType((*TasksResponse)(nil), "models.TasksResponse") + proto.RegisterType((*TaskByGuidRequest)(nil), "models.TaskByGuidRequest") + proto.RegisterType((*TaskResponse)(nil), "models.TaskResponse") +} + +func init() { proto.RegisterFile("task_requests.proto", fileDescriptor_13f778b8a0251259) } + +var fileDescriptor_13f778b8a0251259 = []byte{ + // 663 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xce, 0x26, 0x6d, 0x68, 0x27, 0x4d, 0xd3, 0xb8, 0x05, 0x59, 0x3d, 0xac, 0x23, 0xc3, 0x21, + 0x42, 0x6a, 0x2a, 0xb5, 0x5c, 0x40, 0xa0, 0x8a, 0xb4, 0x80, 0x90, 0x38, 0x2d, 0x45, 0xea, 0x2d, + 0xda, 0xd8, 0x9b, 0xd4, 0xd4, 0xf1, 0x16, 0xef, 0xfa, 0x50, 0x89, 0x43, 0x1f, 0x81, 0x03, 0x0f, + 0xc1, 0x2b, 0xf0, 0x06, 0x1c, 0x7b, 0xec, 0xc9, 0xa2, 0xee, 0x05, 0xf9, 0xd4, 0x47, 0x40, 0xbb, + 0x76, 0x9b, 0x1f, 0x28, 0x6a, 0x22, 0x71, 0xda, 0x9d, 0x6f, 0xc6, 0xdf, 0xcc, 0x37, 0x3b, 0x99, + 0xc0, 0xaa, 0xa4, 0xe2, 0xa8, 0x13, 0xb2, 0x4f, 0x11, 0x13, 0x52, 0xb4, 0x8e, 0x43, 0x2e, 0xb9, + 0x51, 0x1e, 0x70, 0x97, 0xf9, 0x62, 0x7d, 0xa3, 0xef, 0xc9, 0xc3, 0xa8, 0xdb, 0x72, 0xf8, 0x60, + 0xb3, 0xcf, 0xfb, 0x7c, 0x53, 0xbb, 0xbb, 0x51, 0x4f, 0x5b, 0xda, 0xd0, 0xb7, 0xec, 0xb3, 0x75, + 0x50, 0x5c, 0xf9, 0xbd, 0xc2, 0xc2, 0x90, 0x87, 0x99, 0x61, 0x3f, 0x87, 0xfb, 0xfb, 0x54, 0x1c, + 0xbd, 0xf3, 0x7a, 0xcc, 0x39, 0x71, 0x7c, 0x46, 0x98, 0x38, 0xe6, 0x81, 0x60, 0xc6, 0x43, 0x98, + 0xd7, 0x71, 0x26, 0x6a, 0xa0, 0x66, 0x65, 0xab, 0xda, 0xca, 0x12, 0xb7, 0x5e, 0x29, 0x90, 0x64, + 0x3e, 0xfb, 0x3b, 0x82, 0xfa, 0x1e, 0x13, 0x5e, 0xc8, 0x14, 0x09, 0xc9, 0x4a, 0x35, 0xf6, 0xa1, + 0xa6, 0x4b, 0x77, 0x59, 0xcf, 0x0b, 0x3c, 0xe9, 0xf1, 0x20, 0x27, 0x79, 0x70, 0x4d, 0xa2, 0xa2, + 0xf7, 0x6e, 0xbc, 0xed, 0xd5, 0x34, 0xb6, 0x26, 0x3f, 0x21, 0xcb, 0x72, 0x2c, 0xc8, 0x78, 0x0c, + 0x8b, 0x3a, 0xa4, 0x1f, 0x79, 0xae, 0x59, 0x6c, 0xa0, 0xe6, 0x62, 0xbb, 0x9a, 0xc6, 0xd6, 0x10, + 0x24, 0x0b, 0xea, 0xfa, 0x26, 0xf2, 0x5c, 0xc3, 0x86, 0xb2, 0xcb, 0x07, 0xd4, 0x0b, 0xcc, 0x92, + 0x0e, 0x84, 0x34, 0xb6, 0x72, 0x84, 0xe4, 0xa7, 0xed, 0xc2, 0xca, 0x7b, 0x49, 0x43, 0x39, 0x5a, + 0xf9, 0x58, 0x0e, 0xf4, 0xef, 0x1c, 0x8f, 0xe0, 0x9e, 0xc3, 0x7c, 0xbf, 0x73, 0x53, 0x4d, 0x25, + 0x8d, 0xad, 0x6b, 0x88, 0x94, 0xd5, 0xe5, 0xad, 0x6b, 0x0f, 0xa0, 0x3e, 0x92, 0x65, 0x8a, 0xde, + 0x1a, 0xdb, 0xb0, 0x24, 0x0e, 0x79, 0xe4, 0xbb, 0x1d, 0xa1, 0x08, 0x74, 0x92, 0x85, 0xf6, 0x4a, + 0x1a, 0x5b, 0x63, 0x38, 0xa9, 0x64, 0x96, 0xce, 0x62, 0x7f, 0x86, 0xda, 0x6b, 0xea, 0xf9, 0xb3, + 0x6a, 0x7a, 0x0a, 0xcb, 0x3d, 0xea, 0xf9, 0x51, 0xc8, 0x3a, 0x21, 0xa3, 0x82, 0x07, 0xb9, 0x34, + 0x23, 0x8d, 0xad, 0x09, 0x0f, 0xa9, 0xe6, 0x36, 0xd1, 0xe6, 0xb3, 0xa2, 0x89, 0xec, 0x53, 0x04, + 0x75, 0xc2, 0x3e, 0x32, 0x67, 0xe6, 0xa6, 0xee, 0xc0, 0x4a, 0xa8, 0x09, 0x3c, 0x1e, 0x8c, 0x97, + 0xb0, 0x96, 0xc6, 0xd6, 0x1f, 0x3e, 0x52, 0xbb, 0x41, 0xb2, 0x32, 0xec, 0x17, 0x50, 0xdb, 0xcf, + 0xc9, 0x66, 0xc8, 0x6f, 0xa7, 0x08, 0x56, 0x77, 0xf9, 0xe0, 0xd8, 0x67, 0x92, 0xfd, 0xd7, 0xc1, + 0x50, 0x23, 0xaa, 0x1a, 0xc8, 0x5c, 0x3d, 0xa2, 0x0b, 0xd9, 0x88, 0x66, 0x08, 0xc9, 0xcf, 0xbf, + 0x3c, 0xc7, 0xdc, 0x1d, 0x9f, 0x43, 0xd1, 0x87, 0x4c, 0x44, 0xbe, 0x34, 0xe7, 0x87, 0xbf, 0x80, + 0x0c, 0x21, 0xf9, 0x69, 0x7f, 0x2d, 0xc2, 0x9a, 0x12, 0xb9, 0x4b, 0x7d, 0xbf, 0x4b, 0x9d, 0xe1, + 0x7c, 0x4e, 0xa3, 0x76, 0xa8, 0xa3, 0x38, 0x85, 0x8e, 0xd2, 0xf4, 0x3a, 0xe6, 0x6e, 0xd3, 0x61, + 0x60, 0x00, 0x1a, 0x04, 0x5c, 0x52, 0xbd, 0x6a, 0xb4, 0x5e, 0x32, 0x82, 0x18, 0x1b, 0x00, 0x4e, + 0xc8, 0xa8, 0x64, 0x6e, 0x87, 0x4a, 0xb3, 0xdc, 0x40, 0xcd, 0x52, 0x7b, 0x39, 0x8d, 0xad, 0x11, + 0x94, 0x2c, 0xe6, 0xf7, 0x97, 0xd2, 0x3e, 0x80, 0x25, 0xd5, 0x15, 0x71, 0xfd, 0xf6, 0xc3, 0x65, + 0x82, 0x6e, 0x5b, 0x26, 0x77, 0x5c, 0x06, 0x07, 0x50, 0xcd, 0x99, 0xa7, 0x59, 0x04, 0x36, 0xcc, + 0xab, 0x6e, 0x0b, 0xb3, 0xd8, 0x28, 0x35, 0x2b, 0x5b, 0x4b, 0xa3, 0x4b, 0x94, 0x64, 0x2e, 0x7b, + 0x07, 0xea, 0xca, 0x6c, 0x9f, 0xcc, 0x3a, 0xf8, 0x1f, 0x32, 0xd1, 0xd3, 0x55, 0xd6, 0x80, 0x39, + 0x45, 0xa0, 0x25, 0x4f, 0x16, 0xa6, 0x3d, 0xed, 0x27, 0x67, 0x17, 0xb8, 0x70, 0x7e, 0x81, 0x0b, + 0x57, 0x17, 0x18, 0x9d, 0x26, 0x18, 0x7d, 0x4b, 0x30, 0xfa, 0x91, 0x60, 0x74, 0x96, 0x60, 0xf4, + 0x33, 0xc1, 0xe8, 0x57, 0x82, 0x0b, 0x57, 0x09, 0x46, 0x5f, 0x2e, 0x71, 0xe1, 0xec, 0x12, 0x17, + 0xce, 0x2f, 0x71, 0xa1, 0x5b, 0xd6, 0xff, 0x4d, 0xdb, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x42, + 0xbf, 0x2a, 0x4c, 0x02, 0x07, 0x00, 0x00, +} + +func (this *TaskLifecycleResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TaskLifecycleResponse) + if !ok { + that2, ok := that.(TaskLifecycleResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + return true +} +func (this *DesireTaskRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DesireTaskRequest) + if !ok { + that2, ok := that.(DesireTaskRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.TaskDefinition.Equal(that1.TaskDefinition) { + return false + } + if this.TaskGuid != that1.TaskGuid { + return false + } + if this.Domain != that1.Domain { + return false + } + return true +} +func (this *StartTaskRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StartTaskRequest) + if !ok { + that2, ok := that.(StartTaskRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TaskGuid != that1.TaskGuid { + return false + } + if this.CellId != that1.CellId { + return false + } + return true +} +func (this *StartTaskResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StartTaskResponse) + if !ok { + that2, ok := that.(StartTaskResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + if this.ShouldStart != that1.ShouldStart { + return false + } + return true +} +func (this *FailTaskRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FailTaskRequest) + if !ok { + that2, ok := that.(FailTaskRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TaskGuid != that1.TaskGuid { + return false + } + if this.FailureReason != that1.FailureReason { + return false + } + return true +} +func (this *RejectTaskRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RejectTaskRequest) + if !ok { + that2, ok := that.(RejectTaskRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TaskGuid != that1.TaskGuid { + return false + } + if this.RejectionReason != that1.RejectionReason { + return false + } + return true +} +func (this *TaskGuidRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TaskGuidRequest) + if !ok { + that2, ok := that.(TaskGuidRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TaskGuid != that1.TaskGuid { + return false + } + return true +} +func (this *CompleteTaskRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CompleteTaskRequest) + if !ok { + that2, ok := that.(CompleteTaskRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TaskGuid != that1.TaskGuid { + return false + } + if this.CellId != that1.CellId { + return false + } + if this.Failed != that1.Failed { + return false + } + if this.FailureReason != that1.FailureReason { + return false + } + if this.Result != that1.Result { + return false + } + return true +} +func (this *TaskCallbackResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TaskCallbackResponse) + if !ok { + that2, ok := that.(TaskCallbackResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TaskGuid != that1.TaskGuid { + return false + } + if this.Failed != that1.Failed { + return false + } + if this.FailureReason != that1.FailureReason { + return false + } + if this.Result != that1.Result { + return false + } + if this.Annotation != that1.Annotation { + return false + } + if this.CreatedAt != that1.CreatedAt { + return false + } + return true +} +func (this *TasksRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TasksRequest) + if !ok { + that2, ok := that.(TasksRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Domain != that1.Domain { + return false + } + if this.CellId != that1.CellId { + return false + } + return true +} +func (this *TasksResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TasksResponse) + if !ok { + that2, ok := that.(TasksResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + if len(this.Tasks) != len(that1.Tasks) { + return false + } + for i := range this.Tasks { + if !this.Tasks[i].Equal(that1.Tasks[i]) { + return false + } + } + return true +} +func (this *TaskByGuidRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TaskByGuidRequest) + if !ok { + that2, ok := that.(TaskByGuidRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TaskGuid != that1.TaskGuid { + return false + } + return true +} +func (this *TaskResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TaskResponse) + if !ok { + that2, ok := that.(TaskResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + if !this.Task.Equal(that1.Task) { + return false + } + return true +} +func (this *TaskLifecycleResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.TaskLifecycleResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DesireTaskRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&models.DesireTaskRequest{") + if this.TaskDefinition != nil { + s = append(s, "TaskDefinition: "+fmt.Sprintf("%#v", this.TaskDefinition)+",\n") + } + s = append(s, "TaskGuid: "+fmt.Sprintf("%#v", this.TaskGuid)+",\n") + s = append(s, "Domain: "+fmt.Sprintf("%#v", this.Domain)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StartTaskRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.StartTaskRequest{") + s = append(s, "TaskGuid: "+fmt.Sprintf("%#v", this.TaskGuid)+",\n") + s = append(s, "CellId: "+fmt.Sprintf("%#v", this.CellId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StartTaskResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.StartTaskResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + s = append(s, "ShouldStart: "+fmt.Sprintf("%#v", this.ShouldStart)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FailTaskRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.FailTaskRequest{") + s = append(s, "TaskGuid: "+fmt.Sprintf("%#v", this.TaskGuid)+",\n") + s = append(s, "FailureReason: "+fmt.Sprintf("%#v", this.FailureReason)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RejectTaskRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.RejectTaskRequest{") + s = append(s, "TaskGuid: "+fmt.Sprintf("%#v", this.TaskGuid)+",\n") + s = append(s, "RejectionReason: "+fmt.Sprintf("%#v", this.RejectionReason)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskGuidRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.TaskGuidRequest{") + s = append(s, "TaskGuid: "+fmt.Sprintf("%#v", this.TaskGuid)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CompleteTaskRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&models.CompleteTaskRequest{") + s = append(s, "TaskGuid: "+fmt.Sprintf("%#v", this.TaskGuid)+",\n") + s = append(s, "CellId: "+fmt.Sprintf("%#v", this.CellId)+",\n") + s = append(s, "Failed: "+fmt.Sprintf("%#v", this.Failed)+",\n") + s = append(s, "FailureReason: "+fmt.Sprintf("%#v", this.FailureReason)+",\n") + s = append(s, "Result: "+fmt.Sprintf("%#v", this.Result)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskCallbackResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&models.TaskCallbackResponse{") + s = append(s, "TaskGuid: "+fmt.Sprintf("%#v", this.TaskGuid)+",\n") + s = append(s, "Failed: "+fmt.Sprintf("%#v", this.Failed)+",\n") + s = append(s, "FailureReason: "+fmt.Sprintf("%#v", this.FailureReason)+",\n") + s = append(s, "Result: "+fmt.Sprintf("%#v", this.Result)+",\n") + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + s = append(s, "CreatedAt: "+fmt.Sprintf("%#v", this.CreatedAt)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TasksRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.TasksRequest{") + s = append(s, "Domain: "+fmt.Sprintf("%#v", this.Domain)+",\n") + s = append(s, "CellId: "+fmt.Sprintf("%#v", this.CellId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TasksResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.TasksResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + if this.Tasks != nil { + s = append(s, "Tasks: "+fmt.Sprintf("%#v", this.Tasks)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskByGuidRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.TaskByGuidRequest{") + s = append(s, "TaskGuid: "+fmt.Sprintf("%#v", this.TaskGuid)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.TaskResponse{") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + if this.Task != nil { + s = append(s, "Task: "+fmt.Sprintf("%#v", this.Task)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringTaskRequests(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *TaskLifecycleResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskLifecycleResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskLifecycleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTaskRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DesireTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DesireTaskRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DesireTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Domain) > 0 { + i -= len(m.Domain) + copy(dAtA[i:], m.Domain) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.Domain))) + i-- + dAtA[i] = 0x1a + } + if len(m.TaskGuid) > 0 { + i -= len(m.TaskGuid) + copy(dAtA[i:], m.TaskGuid) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.TaskGuid))) + i-- + dAtA[i] = 0x12 + } + if m.TaskDefinition != nil { + { + size, err := m.TaskDefinition.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTaskRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StartTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartTaskRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StartTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.CellId) > 0 { + i -= len(m.CellId) + copy(dAtA[i:], m.CellId) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.CellId))) + i-- + dAtA[i] = 0x12 + } + if len(m.TaskGuid) > 0 { + i -= len(m.TaskGuid) + copy(dAtA[i:], m.TaskGuid) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.TaskGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StartTaskResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartTaskResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StartTaskResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ShouldStart { + i-- + if m.ShouldStart { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTaskRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FailTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FailTaskRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FailTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.FailureReason) > 0 { + i -= len(m.FailureReason) + copy(dAtA[i:], m.FailureReason) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.FailureReason))) + i-- + dAtA[i] = 0x12 + } + if len(m.TaskGuid) > 0 { + i -= len(m.TaskGuid) + copy(dAtA[i:], m.TaskGuid) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.TaskGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RejectTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RejectTaskRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RejectTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RejectionReason) > 0 { + i -= len(m.RejectionReason) + copy(dAtA[i:], m.RejectionReason) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.RejectionReason))) + i-- + dAtA[i] = 0x12 + } + if len(m.TaskGuid) > 0 { + i -= len(m.TaskGuid) + copy(dAtA[i:], m.TaskGuid) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.TaskGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TaskGuidRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskGuidRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskGuidRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TaskGuid) > 0 { + i -= len(m.TaskGuid) + copy(dAtA[i:], m.TaskGuid) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.TaskGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CompleteTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompleteTaskRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CompleteTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Result) > 0 { + i -= len(m.Result) + copy(dAtA[i:], m.Result) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.Result))) + i-- + dAtA[i] = 0x2a + } + if len(m.FailureReason) > 0 { + i -= len(m.FailureReason) + copy(dAtA[i:], m.FailureReason) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.FailureReason))) + i-- + dAtA[i] = 0x22 + } + if m.Failed { + i-- + if m.Failed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.CellId) > 0 { + i -= len(m.CellId) + copy(dAtA[i:], m.CellId) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.CellId))) + i-- + dAtA[i] = 0x12 + } + if len(m.TaskGuid) > 0 { + i -= len(m.TaskGuid) + copy(dAtA[i:], m.TaskGuid) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.TaskGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TaskCallbackResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskCallbackResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskCallbackResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CreatedAt != 0 { + i = encodeVarintTaskRequests(dAtA, i, uint64(m.CreatedAt)) + i-- + dAtA[i] = 0x30 + } + if len(m.Annotation) > 0 { + i -= len(m.Annotation) + copy(dAtA[i:], m.Annotation) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.Annotation))) + i-- + dAtA[i] = 0x2a + } + if len(m.Result) > 0 { + i -= len(m.Result) + copy(dAtA[i:], m.Result) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.Result))) + i-- + dAtA[i] = 0x22 + } + if len(m.FailureReason) > 0 { + i -= len(m.FailureReason) + copy(dAtA[i:], m.FailureReason) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.FailureReason))) + i-- + dAtA[i] = 0x1a + } + if m.Failed { + i-- + if m.Failed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.TaskGuid) > 0 { + i -= len(m.TaskGuid) + copy(dAtA[i:], m.TaskGuid) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.TaskGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TasksRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TasksRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TasksRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.CellId) > 0 { + i -= len(m.CellId) + copy(dAtA[i:], m.CellId) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.CellId))) + i-- + dAtA[i] = 0x12 + } + if len(m.Domain) > 0 { + i -= len(m.Domain) + copy(dAtA[i:], m.Domain) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.Domain))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TasksResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TasksResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TasksResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Tasks) > 0 { + for iNdEx := len(m.Tasks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tasks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTaskRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTaskRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TaskByGuidRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskByGuidRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskByGuidRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TaskGuid) > 0 { + i -= len(m.TaskGuid) + copy(dAtA[i:], m.TaskGuid) + i = encodeVarintTaskRequests(dAtA, i, uint64(len(m.TaskGuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TaskResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Task != nil { + { + size, err := m.Task.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTaskRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTaskRequests(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintTaskRequests(dAtA []byte, offset int, v uint64) int { + offset -= sovTaskRequests(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *TaskLifecycleResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovTaskRequests(uint64(l)) + } + return n +} + +func (m *DesireTaskRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TaskDefinition != nil { + l = m.TaskDefinition.Size() + n += 1 + l + sovTaskRequests(uint64(l)) + } + l = len(m.TaskGuid) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + l = len(m.Domain) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + return n +} + +func (m *StartTaskRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TaskGuid) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + l = len(m.CellId) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + return n +} + +func (m *StartTaskResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovTaskRequests(uint64(l)) + } + if m.ShouldStart { + n += 2 + } + return n +} + +func (m *FailTaskRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TaskGuid) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + l = len(m.FailureReason) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + return n +} + +func (m *RejectTaskRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TaskGuid) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + l = len(m.RejectionReason) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + return n +} + +func (m *TaskGuidRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TaskGuid) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + return n +} + +func (m *CompleteTaskRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TaskGuid) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + l = len(m.CellId) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + if m.Failed { + n += 2 + } + l = len(m.FailureReason) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + l = len(m.Result) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + return n +} + +func (m *TaskCallbackResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TaskGuid) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + if m.Failed { + n += 2 + } + l = len(m.FailureReason) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + l = len(m.Result) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + l = len(m.Annotation) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + if m.CreatedAt != 0 { + n += 1 + sovTaskRequests(uint64(m.CreatedAt)) + } + return n +} + +func (m *TasksRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Domain) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + l = len(m.CellId) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + return n +} + +func (m *TasksResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovTaskRequests(uint64(l)) + } + if len(m.Tasks) > 0 { + for _, e := range m.Tasks { + l = e.Size() + n += 1 + l + sovTaskRequests(uint64(l)) + } + } + return n +} + +func (m *TaskByGuidRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TaskGuid) + if l > 0 { + n += 1 + l + sovTaskRequests(uint64(l)) + } + return n +} + +func (m *TaskResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovTaskRequests(uint64(l)) + } + if m.Task != nil { + l = m.Task.Size() + n += 1 + l + sovTaskRequests(uint64(l)) + } + return n +} + +func sovTaskRequests(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTaskRequests(x uint64) (n int) { + return sovTaskRequests(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *TaskLifecycleResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskLifecycleResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DesireTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DesireTaskRequest{`, + `TaskDefinition:` + strings.Replace(fmt.Sprintf("%v", this.TaskDefinition), "TaskDefinition", "TaskDefinition", 1) + `,`, + `TaskGuid:` + fmt.Sprintf("%v", this.TaskGuid) + `,`, + `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`, + `}`, + }, "") + return s +} +func (this *StartTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StartTaskRequest{`, + `TaskGuid:` + fmt.Sprintf("%v", this.TaskGuid) + `,`, + `CellId:` + fmt.Sprintf("%v", this.CellId) + `,`, + `}`, + }, "") + return s +} +func (this *StartTaskResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StartTaskResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `ShouldStart:` + fmt.Sprintf("%v", this.ShouldStart) + `,`, + `}`, + }, "") + return s +} +func (this *FailTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FailTaskRequest{`, + `TaskGuid:` + fmt.Sprintf("%v", this.TaskGuid) + `,`, + `FailureReason:` + fmt.Sprintf("%v", this.FailureReason) + `,`, + `}`, + }, "") + return s +} +func (this *RejectTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RejectTaskRequest{`, + `TaskGuid:` + fmt.Sprintf("%v", this.TaskGuid) + `,`, + `RejectionReason:` + fmt.Sprintf("%v", this.RejectionReason) + `,`, + `}`, + }, "") + return s +} +func (this *TaskGuidRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskGuidRequest{`, + `TaskGuid:` + fmt.Sprintf("%v", this.TaskGuid) + `,`, + `}`, + }, "") + return s +} +func (this *CompleteTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CompleteTaskRequest{`, + `TaskGuid:` + fmt.Sprintf("%v", this.TaskGuid) + `,`, + `CellId:` + fmt.Sprintf("%v", this.CellId) + `,`, + `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, + `FailureReason:` + fmt.Sprintf("%v", this.FailureReason) + `,`, + `Result:` + fmt.Sprintf("%v", this.Result) + `,`, + `}`, + }, "") + return s +} +func (this *TaskCallbackResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskCallbackResponse{`, + `TaskGuid:` + fmt.Sprintf("%v", this.TaskGuid) + `,`, + `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, + `FailureReason:` + fmt.Sprintf("%v", this.FailureReason) + `,`, + `Result:` + fmt.Sprintf("%v", this.Result) + `,`, + `Annotation:` + fmt.Sprintf("%v", this.Annotation) + `,`, + `CreatedAt:` + fmt.Sprintf("%v", this.CreatedAt) + `,`, + `}`, + }, "") + return s +} +func (this *TasksRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TasksRequest{`, + `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`, + `CellId:` + fmt.Sprintf("%v", this.CellId) + `,`, + `}`, + }, "") + return s +} +func (this *TasksResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForTasks := "[]*Task{" + for _, f := range this.Tasks { + repeatedStringForTasks += strings.Replace(fmt.Sprintf("%v", f), "Task", "Task", 1) + "," + } + repeatedStringForTasks += "}" + s := strings.Join([]string{`&TasksResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `Tasks:` + repeatedStringForTasks + `,`, + `}`, + }, "") + return s +} +func (this *TaskByGuidRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskByGuidRequest{`, + `TaskGuid:` + fmt.Sprintf("%v", this.TaskGuid) + `,`, + `}`, + }, "") + return s +} +func (this *TaskResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskResponse{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Error", "Error", 1) + `,`, + `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringTaskRequests(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *TaskLifecycleResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskLifecycleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskLifecycleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DesireTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DesireTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DesireTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskDefinition", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TaskDefinition == nil { + m.TaskDefinition = &TaskDefinition{} + } + if err := m.TaskDefinition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CellId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartTaskResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartTaskResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ShouldStart", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ShouldStart = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FailTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FailTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FailTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FailureReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RejectTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RejectTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RejectTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RejectionReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RejectionReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskGuidRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskGuidRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskGuidRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompleteTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompleteTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompleteTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CellId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Failed = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FailureReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Result = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskCallbackResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskCallbackResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskCallbackResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Failed = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FailureReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Result = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Annotation = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + m.CreatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TasksRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TasksRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TasksRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CellId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TasksResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TasksResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TasksResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tasks = append(m.Tasks, &Task{}) + if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskByGuidRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskByGuidRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskByGuidRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskGuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskGuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTaskRequests + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTaskRequests + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Task == nil { + m.Task = &Task{} + } + if err := m.Task.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTaskRequests(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTaskRequests + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTaskRequests(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTaskRequests + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTaskRequests + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTaskRequests + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTaskRequests + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTaskRequests = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTaskRequests = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTaskRequests = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/task_requests.proto b/vendor/code.cloudfoundry.org/bbs/models/task_requests.proto new file mode 100644 index 00000000..bf43ad66 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/task_requests.proto @@ -0,0 +1,78 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "task.proto"; +import "error.proto"; + +message TaskLifecycleResponse { + Error error = 1; +} + +message DesireTaskRequest { + TaskDefinition task_definition = 1 [(gogoproto.jsontag) = "task_definition"]; + string task_guid = 2 [(gogoproto.jsontag) = "task_guid"]; + string domain = 3 [(gogoproto.jsontag) = "domain"]; +} + +message StartTaskRequest { + string task_guid = 1 [(gogoproto.jsontag) = "task_guid"]; + string cell_id = 2 [(gogoproto.jsontag) = "cell_id"]; +} + +message StartTaskResponse { + Error error = 1; + bool should_start = 2 [(gogoproto.jsontag) = "should_start"]; +} + +message FailTaskRequest { + option deprecated = true; + string task_guid = 1 [(gogoproto.jsontag) = "task_guid"]; + string failure_reason = 2 [(gogoproto.jsontag) = "failure_reason"]; +} + +message RejectTaskRequest { + string task_guid = 1 [(gogoproto.jsontag) = "task_guid"]; + string rejection_reason = 2 [(gogoproto.jsontag) = "rejection_reason"]; +} + +message TaskGuidRequest { + string task_guid = 1 [(gogoproto.jsontag) = "task_guid"]; +} + +message CompleteTaskRequest { + string task_guid = 1 [(gogoproto.jsontag) = "task_guid"]; + string cell_id = 2 [(gogoproto.jsontag) = "cell_id"]; + bool failed = 3 [(gogoproto.jsontag) = "failed"]; + string failure_reason = 4 [(gogoproto.jsontag) = "failure_reason"]; + string result = 5 [(gogoproto.jsontag) = "result"]; +} + +message TaskCallbackResponse { + string task_guid = 1 [(gogoproto.jsontag) = "task_guid"]; + bool failed = 2 [(gogoproto.jsontag) = "failed"]; + string failure_reason = 3 [(gogoproto.jsontag) = "failure_reason"]; + string result = 4 [(gogoproto.jsontag) = "result"]; + string annotation = 5; + int64 created_at = 6 [(gogoproto.jsontag) = "created_at"]; +} + +message TasksRequest{ + string domain = 1 [(gogoproto.jsontag) = "domain"]; + string cell_id = 2 [(gogoproto.jsontag) = "cell_id"]; +} + +message TasksResponse{ + Error error = 1; + repeated Task tasks = 2; +} + +message TaskByGuidRequest{ + string task_guid = 1 [(gogoproto.jsontag) = "task_guid"]; +} + +message TaskResponse{ + Error error = 1; + Task task = 2; +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/validator.go b/vendor/code.cloudfoundry.org/bbs/models/validator.go new file mode 100644 index 00000000..ff8f1b81 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/validator.go @@ -0,0 +1,58 @@ +package models + +import ( + "bytes" +) + +type ValidationError []error + +func (ve ValidationError) Append(err error) ValidationError { + switch err := err.(type) { + case ValidationError: + return append(ve, err...) + default: + return append(ve, err) + } +} + +func (ve ValidationError) ToError() error { + if len(ve) == 0 { + return nil + } else { + return ve + } +} + +func (ve ValidationError) Error() string { + var buffer bytes.Buffer + + for i, err := range ve { + if err == nil { + continue + } + if i > 0 { + buffer.WriteString(", ") + } + buffer.WriteString(err.Error()) + } + + return buffer.String() +} + +func (ve ValidationError) Empty() bool { + return len(ve) == 0 +} + +type Validator interface { + Validate() error +} + +func (ve ValidationError) Check(validators ...Validator) ValidationError { + for _, v := range validators { + err := v.Validate() + if err != nil { + ve = ve.Append(err) + } + } + return ve +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/version.go b/vendor/code.cloudfoundry.org/bbs/models/version.go new file mode 100644 index 00000000..54c6ad14 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/version.go @@ -0,0 +1,5 @@ +package models + +type Version struct { + CurrentVersion int64 +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/volume_mount.go b/vendor/code.cloudfoundry.org/bbs/models/volume_mount.go new file mode 100644 index 00000000..fff1b251 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/volume_mount.go @@ -0,0 +1,34 @@ +package models + +import ( + "errors" + + "code.cloudfoundry.org/bbs/format" +) + +func (*VolumePlacement) Version() format.Version { + return format.V1 +} + +func (*VolumePlacement) Validate() error { + return nil +} + +func (v *VolumeMount) Validate() error { + var ve ValidationError + if v.Driver == "" { + ve = ve.Append(errors.New("invalid volume_mount driver")) + } + if !(v.Mode == "r" || v.Mode == "rw") { + ve = ve.Append(errors.New("invalid volume_mount mode")) + } + if v.Shared != nil && v.Shared.VolumeId == "" { + ve = ve.Append(errors.New("invalid volume_mount volume id")) + } + + if !ve.Empty() { + return ve + } + + return nil +} diff --git a/vendor/code.cloudfoundry.org/bbs/models/volume_mount.pb.go b/vendor/code.cloudfoundry.org/bbs/models/volume_mount.pb.go new file mode 100644 index 00000000..2193674d --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/volume_mount.pb.go @@ -0,0 +1,1061 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: volume_mount.proto + +package models + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type SharedDevice struct { + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id"` + MountConfig string `protobuf:"bytes,2,opt,name=mount_config,json=mountConfig,proto3" json:"mount_config"` +} + +func (m *SharedDevice) Reset() { *m = SharedDevice{} } +func (*SharedDevice) ProtoMessage() {} +func (*SharedDevice) Descriptor() ([]byte, []int) { + return fileDescriptor_bbde336a4634d84f, []int{0} +} +func (m *SharedDevice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SharedDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SharedDevice.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SharedDevice) XXX_Merge(src proto.Message) { + xxx_messageInfo_SharedDevice.Merge(m, src) +} +func (m *SharedDevice) XXX_Size() int { + return m.Size() +} +func (m *SharedDevice) XXX_DiscardUnknown() { + xxx_messageInfo_SharedDevice.DiscardUnknown(m) +} + +var xxx_messageInfo_SharedDevice proto.InternalMessageInfo + +func (m *SharedDevice) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *SharedDevice) GetMountConfig() string { + if m != nil { + return m.MountConfig + } + return "" +} + +type VolumeMount struct { + Driver string `protobuf:"bytes,1,opt,name=driver,proto3" json:"driver"` + ContainerDir string `protobuf:"bytes,3,opt,name=container_dir,json=containerDir,proto3" json:"container_dir"` + Mode string `protobuf:"bytes,6,opt,name=mode,proto3" json:"mode"` + // oneof device { + Shared *SharedDevice `protobuf:"bytes,7,opt,name=shared,proto3" json:"shared"` +} + +func (m *VolumeMount) Reset() { *m = VolumeMount{} } +func (*VolumeMount) ProtoMessage() {} +func (*VolumeMount) Descriptor() ([]byte, []int) { + return fileDescriptor_bbde336a4634d84f, []int{1} +} +func (m *VolumeMount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeMount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VolumeMount.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VolumeMount) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeMount.Merge(m, src) +} +func (m *VolumeMount) XXX_Size() int { + return m.Size() +} +func (m *VolumeMount) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeMount.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeMount proto.InternalMessageInfo + +func (m *VolumeMount) GetDriver() string { + if m != nil { + return m.Driver + } + return "" +} + +func (m *VolumeMount) GetContainerDir() string { + if m != nil { + return m.ContainerDir + } + return "" +} + +func (m *VolumeMount) GetMode() string { + if m != nil { + return m.Mode + } + return "" +} + +func (m *VolumeMount) GetShared() *SharedDevice { + if m != nil { + return m.Shared + } + return nil +} + +type VolumePlacement struct { + DriverNames []string `protobuf:"bytes,1,rep,name=driver_names,json=driverNames,proto3" json:"driver_names"` +} + +func (m *VolumePlacement) Reset() { *m = VolumePlacement{} } +func (*VolumePlacement) ProtoMessage() {} +func (*VolumePlacement) Descriptor() ([]byte, []int) { + return fileDescriptor_bbde336a4634d84f, []int{2} +} +func (m *VolumePlacement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumePlacement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VolumePlacement.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VolumePlacement) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumePlacement.Merge(m, src) +} +func (m *VolumePlacement) XXX_Size() int { + return m.Size() +} +func (m *VolumePlacement) XXX_DiscardUnknown() { + xxx_messageInfo_VolumePlacement.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumePlacement proto.InternalMessageInfo + +func (m *VolumePlacement) GetDriverNames() []string { + if m != nil { + return m.DriverNames + } + return nil +} + +func init() { + proto.RegisterType((*SharedDevice)(nil), "models.SharedDevice") + proto.RegisterType((*VolumeMount)(nil), "models.VolumeMount") + proto.RegisterType((*VolumePlacement)(nil), "models.VolumePlacement") +} + +func init() { proto.RegisterFile("volume_mount.proto", fileDescriptor_bbde336a4634d84f) } + +var fileDescriptor_bbde336a4634d84f = []byte{ + // 381 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x91, 0xc1, 0x6a, 0xa3, 0x40, + 0x18, 0xc7, 0x9d, 0xc4, 0xb8, 0x66, 0x4c, 0x58, 0x77, 0xd8, 0x83, 0x2c, 0xcb, 0x18, 0x3c, 0x85, + 0x85, 0x35, 0xd0, 0x94, 0xd2, 0x73, 0x1a, 0x0a, 0x0d, 0xb4, 0x14, 0x0b, 0xbd, 0x8a, 0xd1, 0x89, + 0x19, 0x88, 0x4e, 0x31, 0x9a, 0x73, 0x1f, 0xa1, 0x8f, 0xd1, 0x47, 0xe9, 0x31, 0xd0, 0x4b, 0x4e, + 0xd2, 0x98, 0x4b, 0xf1, 0x94, 0x47, 0x28, 0xce, 0xd8, 0x36, 0xb9, 0x38, 0xf3, 0xfb, 0x7f, 0x7f, + 0x3f, 0xbf, 0xef, 0x2f, 0x44, 0x2b, 0xb6, 0xc8, 0x22, 0xe2, 0x46, 0x2c, 0x8b, 0x53, 0xfb, 0x21, + 0x61, 0x29, 0x43, 0x4a, 0xc4, 0x02, 0xb2, 0x58, 0xfe, 0xf9, 0x1f, 0xd2, 0x74, 0x9e, 0x4d, 0x6d, + 0x9f, 0x45, 0x83, 0x90, 0x85, 0x6c, 0xc0, 0xcb, 0xd3, 0x6c, 0xc6, 0x89, 0x03, 0xbf, 0x89, 0xd7, + 0x2c, 0x06, 0x3b, 0x77, 0x73, 0x2f, 0x21, 0xc1, 0x98, 0xac, 0xa8, 0x4f, 0xd0, 0x3f, 0xd8, 0xae, + 0x9b, 0xd3, 0xc0, 0x00, 0x3d, 0xd0, 0x6f, 0x8f, 0xba, 0x65, 0x6e, 0x7e, 0x8b, 0x8e, 0x2a, 0xae, + 0x57, 0x01, 0x1a, 0xc2, 0x0e, 0x9f, 0xc0, 0xf5, 0x59, 0x3c, 0xa3, 0xa1, 0xd1, 0xe0, 0x76, 0xbd, + 0xcc, 0xcd, 0x23, 0xdd, 0xd1, 0x38, 0x5d, 0x70, 0xb0, 0x5e, 0x01, 0xd4, 0xee, 0x79, 0x87, 0xeb, + 0x4a, 0x45, 0x16, 0x54, 0x82, 0x84, 0xae, 0x48, 0x52, 0x7f, 0x0d, 0x96, 0xb9, 0x59, 0x2b, 0x4e, + 0x7d, 0xa2, 0x33, 0xd8, 0xf5, 0x59, 0x9c, 0x7a, 0x34, 0x26, 0x89, 0x1b, 0xd0, 0xc4, 0x68, 0x72, + 0xeb, 0xaf, 0x32, 0x37, 0x8f, 0x0b, 0x4e, 0xe7, 0x0b, 0xc7, 0x34, 0x41, 0x7f, 0xa1, 0x5c, 0xa5, + 0x62, 0x28, 0xdc, 0xae, 0x96, 0xb9, 0xc9, 0xd9, 0xe1, 0x4f, 0x74, 0x0e, 0x95, 0x25, 0x5f, 0xdd, + 0xf8, 0xd1, 0x03, 0x7d, 0xed, 0xe4, 0xb7, 0x2d, 0x22, 0xb4, 0x0f, 0x03, 0x11, 0xf3, 0x08, 0x9f, + 0x53, 0x9f, 0x13, 0x59, 0x6d, 0xe8, 0xcd, 0x89, 0xac, 0xca, 0x7a, 0x6b, 0x22, 0xab, 0x2d, 0x5d, + 0xb1, 0x2e, 0xe1, 0x4f, 0xb1, 0xd4, 0xed, 0xc2, 0xf3, 0x49, 0x44, 0xe2, 0xb4, 0x4a, 0x47, 0x8c, + 0xef, 0xc6, 0x5e, 0x44, 0x96, 0x06, 0xe8, 0x35, 0x3f, 0xd3, 0x39, 0xd4, 0x1d, 0x4d, 0xd0, 0x4d, + 0x05, 0xa3, 0xd3, 0xf5, 0x16, 0x83, 0xcd, 0x16, 0x4b, 0xfb, 0x2d, 0x06, 0x8f, 0x05, 0x06, 0xcf, + 0x05, 0x06, 0x2f, 0x05, 0x06, 0xeb, 0x02, 0x83, 0xb7, 0x02, 0x83, 0xf7, 0x02, 0x4b, 0xfb, 0x02, + 0x83, 0xa7, 0x1d, 0x96, 0xd6, 0x3b, 0x2c, 0x6d, 0x76, 0x58, 0x9a, 0x2a, 0xfc, 0x5f, 0x0e, 0x3f, + 0x02, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x23, 0x60, 0xde, 0x18, 0x02, 0x00, 0x00, +} + +func (this *SharedDevice) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SharedDevice) + if !ok { + that2, ok := that.(SharedDevice) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.VolumeId != that1.VolumeId { + return false + } + if this.MountConfig != that1.MountConfig { + return false + } + return true +} +func (this *VolumeMount) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*VolumeMount) + if !ok { + that2, ok := that.(VolumeMount) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Driver != that1.Driver { + return false + } + if this.ContainerDir != that1.ContainerDir { + return false + } + if this.Mode != that1.Mode { + return false + } + if !this.Shared.Equal(that1.Shared) { + return false + } + return true +} +func (this *VolumePlacement) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*VolumePlacement) + if !ok { + that2, ok := that.(VolumePlacement) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.DriverNames) != len(that1.DriverNames) { + return false + } + for i := range this.DriverNames { + if this.DriverNames[i] != that1.DriverNames[i] { + return false + } + } + return true +} +func (this *SharedDevice) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&models.SharedDevice{") + s = append(s, "VolumeId: "+fmt.Sprintf("%#v", this.VolumeId)+",\n") + s = append(s, "MountConfig: "+fmt.Sprintf("%#v", this.MountConfig)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *VolumeMount) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&models.VolumeMount{") + s = append(s, "Driver: "+fmt.Sprintf("%#v", this.Driver)+",\n") + s = append(s, "ContainerDir: "+fmt.Sprintf("%#v", this.ContainerDir)+",\n") + s = append(s, "Mode: "+fmt.Sprintf("%#v", this.Mode)+",\n") + if this.Shared != nil { + s = append(s, "Shared: "+fmt.Sprintf("%#v", this.Shared)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *VolumePlacement) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&models.VolumePlacement{") + s = append(s, "DriverNames: "+fmt.Sprintf("%#v", this.DriverNames)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringVolumeMount(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *SharedDevice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SharedDevice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SharedDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.MountConfig) > 0 { + i -= len(m.MountConfig) + copy(dAtA[i:], m.MountConfig) + i = encodeVarintVolumeMount(dAtA, i, uint64(len(m.MountConfig))) + i-- + dAtA[i] = 0x12 + } + if len(m.VolumeId) > 0 { + i -= len(m.VolumeId) + copy(dAtA[i:], m.VolumeId) + i = encodeVarintVolumeMount(dAtA, i, uint64(len(m.VolumeId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VolumeMount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeMount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeMount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Shared != nil { + { + size, err := m.Shared.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVolumeMount(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if len(m.Mode) > 0 { + i -= len(m.Mode) + copy(dAtA[i:], m.Mode) + i = encodeVarintVolumeMount(dAtA, i, uint64(len(m.Mode))) + i-- + dAtA[i] = 0x32 + } + if len(m.ContainerDir) > 0 { + i -= len(m.ContainerDir) + copy(dAtA[i:], m.ContainerDir) + i = encodeVarintVolumeMount(dAtA, i, uint64(len(m.ContainerDir))) + i-- + dAtA[i] = 0x1a + } + if len(m.Driver) > 0 { + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintVolumeMount(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VolumePlacement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumePlacement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumePlacement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DriverNames) > 0 { + for iNdEx := len(m.DriverNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DriverNames[iNdEx]) + copy(dAtA[i:], m.DriverNames[iNdEx]) + i = encodeVarintVolumeMount(dAtA, i, uint64(len(m.DriverNames[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintVolumeMount(dAtA []byte, offset int, v uint64) int { + offset -= sovVolumeMount(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *SharedDevice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumeId) + if l > 0 { + n += 1 + l + sovVolumeMount(uint64(l)) + } + l = len(m.MountConfig) + if l > 0 { + n += 1 + l + sovVolumeMount(uint64(l)) + } + return n +} + +func (m *VolumeMount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + if l > 0 { + n += 1 + l + sovVolumeMount(uint64(l)) + } + l = len(m.ContainerDir) + if l > 0 { + n += 1 + l + sovVolumeMount(uint64(l)) + } + l = len(m.Mode) + if l > 0 { + n += 1 + l + sovVolumeMount(uint64(l)) + } + if m.Shared != nil { + l = m.Shared.Size() + n += 1 + l + sovVolumeMount(uint64(l)) + } + return n +} + +func (m *VolumePlacement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DriverNames) > 0 { + for _, s := range m.DriverNames { + l = len(s) + n += 1 + l + sovVolumeMount(uint64(l)) + } + } + return n +} + +func sovVolumeMount(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozVolumeMount(x uint64) (n int) { + return sovVolumeMount(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *SharedDevice) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SharedDevice{`, + `VolumeId:` + fmt.Sprintf("%v", this.VolumeId) + `,`, + `MountConfig:` + fmt.Sprintf("%v", this.MountConfig) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeMount) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeMount{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `ContainerDir:` + fmt.Sprintf("%v", this.ContainerDir) + `,`, + `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, + `Shared:` + strings.Replace(this.Shared.String(), "SharedDevice", "SharedDevice", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumePlacement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumePlacement{`, + `DriverNames:` + fmt.Sprintf("%v", this.DriverNames) + `,`, + `}`, + }, "") + return s +} +func valueToStringVolumeMount(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *SharedDevice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SharedDevice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SharedDevice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVolumeMount + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVolumeMount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountConfig", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVolumeMount + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVolumeMount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MountConfig = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVolumeMount(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthVolumeMount + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeMount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeMount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeMount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVolumeMount + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVolumeMount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVolumeMount + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVolumeMount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVolumeMount + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVolumeMount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mode = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shared", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVolumeMount + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVolumeMount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Shared == nil { + m.Shared = &SharedDevice{} + } + if err := m.Shared.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVolumeMount(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthVolumeMount + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumePlacement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumePlacement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumePlacement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVolumeMount + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVolumeMount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DriverNames = append(m.DriverNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVolumeMount(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthVolumeMount + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipVolumeMount(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVolumeMount + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthVolumeMount + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupVolumeMount + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthVolumeMount + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthVolumeMount = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowVolumeMount = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupVolumeMount = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/code.cloudfoundry.org/bbs/models/volume_mount.proto b/vendor/code.cloudfoundry.org/bbs/models/volume_mount.proto new file mode 100644 index 00000000..3139b87e --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/models/volume_mount.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package models; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.goproto_enum_prefix_all) = true; + +message SharedDevice { + string volume_id = 1 [(gogoproto.jsontag) = "volume_id"]; + string mount_config = 2 [(gogoproto.jsontag) = "mount_config"]; +} + +message VolumeMount { + reserved 2, 4, 5; + + string driver = 1 [(gogoproto.jsontag) = "driver"]; + string container_dir = 3 [(gogoproto.jsontag) = "container_dir"]; + string mode = 6 [(gogoproto.jsontag) = "mode"]; + + // oneof device { + SharedDevice shared = 7 [(gogoproto.jsontag) = "shared"]; + // } +} + +message VolumePlacement { + repeated string driver_names = 1 [(gogoproto.jsontag) = "driver_names"]; +} diff --git a/vendor/code.cloudfoundry.org/bbs/package.go b/vendor/code.cloudfoundry.org/bbs/package.go new file mode 100644 index 00000000..0d460353 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/package.go @@ -0,0 +1 @@ +package bbs // import "code.cloudfoundry.org/bbs" diff --git a/vendor/code.cloudfoundry.org/bbs/routes.go b/vendor/code.cloudfoundry.org/bbs/routes.go new file mode 100644 index 00000000..9edc913a --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/routes.go @@ -0,0 +1,162 @@ +package bbs + +import "github.com/tedsuo/rata" + +const ( + // Ping + PingRoute_r0 = "Ping" + + // Domains + DomainsRoute_r0 = "Domains" + UpsertDomainRoute_r0 = "UpsertDomain" + + // Actual LRPs + ActualLRPsRoute_r0 = "ActualLRPs" + // Deprecated: use the ActualLRPInstances API instead + ActualLRPGroupsRoute_r0 = "ActualLRPGroups" + // Deprecated: use the ActualLRPInstances API instead + ActualLRPGroupsByProcessGuidRoute_r0 = "ActualLRPGroupsByProcessGuid" + // Deprecated: use the ActualLRPInstances API instead + ActualLRPGroupByProcessGuidAndIndexRoute_r0 = "ActualLRPGroupsByProcessGuidAndIndex" + + // Actual LRP Lifecycle + ClaimActualLRPRoute_r0 = "ClaimActualLRP" + StartActualLRPRoute_r1 = "StartActualLRP" + // Deprecated: use StartActaulLRPRoute_r1 instead + StartActualLRPRoute_r0 = "StartActualLRP_r0" + CrashActualLRPRoute_r0 = "CrashActualLRP" + FailActualLRPRoute_r0 = "FailActualLRP" + RemoveActualLRPRoute_r0 = "RemoveActualLRP" + RetireActualLRPRoute_r0 = "RetireActualLRP" + + // Evacuation + RemoveEvacuatingActualLRPRoute_r0 = "RemoveEvacuatingActualLRP" + EvacuateClaimedActualLRPRoute_r0 = "EvacuateClaimedActualLRP" + EvacuateCrashedActualLRPRoute_r0 = "EvacuateCrashedActualLRP" + EvacuateStoppedActualLRPRoute_r0 = "EvacuateStoppedActualLRP" + EvacuateRunningActualLRPRoute_r1 = "EvacuateRunningActualLRP" + // Deprecated: use EvacuateRunningActualLRPRoute_r1 instead + EvacuateRunningActualLRPRoute_r0 = "EvacuateRunningActualLRP_r0" + + // Desired LRPs + DesiredLRPsRoute_r3 = "DesiredLRPs" + DesiredLRPSchedulingInfosRoute_r0 = "DesiredLRPSchedulingInfos" + DesiredLRPSchedulingInfoByProcessGuid_r0 = "DesiredLRPSchedulingInfoByProcessGuid" + DesiredLRPRoutingInfosRoute_r0 = "DesiredLRPRoutingInfos" + DesiredLRPByProcessGuidRoute_r3 = "DesiredLRPByProcessGuid" + // Deprecated: use DsiredLRPByProcessGuidRoute_r3 instead + DesiredLRPsRoute_r2 = "DesiredLRPs_r2" + // Deprecated: use DsiredLRPByProcessGuidRoute_r3 instead + DesiredLRPByProcessGuidRoute_r2 = "DesiredLRPByProcessGuid_r2" + + // Desire LRP Lifecycle + DesireDesiredLRPRoute_r2 = "DesireDesiredLRP" + UpdateDesiredLRPRoute_r0 = "UpdateDesireLRP" + RemoveDesiredLRPRoute_r0 = "RemoveDesiredLRP" + + // Tasks + TasksRoute_r3 = "Tasks" + TaskByGuidRoute_r3 = "TaskByGuid" + DesireTaskRoute_r2 = "DesireTask" + StartTaskRoute_r0 = "StartTask" + CancelTaskRoute_r0 = "CancelTask" + // Deprecated: use CancelTaskRotue_r0 instead + FailTaskRoute_r0 = "FailTask" + RejectTaskRoute_r0 = "RejectTask" + CompleteTaskRoute_r0 = "CompleteTask" + ResolvingTaskRoute_r0 = "ResolvingTask" + DeleteTaskRoute_r0 = "DeleteTask" + // Deprecated: use TaskRoute_r3 instead + TasksRoute_r2 = "Tasks_r2" + // Deprecated: use TaskByGuid_r3 instead + TaskByGuidRoute_r2 = "TaskByGuid_r2" + + // Event Streaming + //Deprecated: use LRPInstanceEventStreamRoute_1 instead + LRPGroupEventStreamRoute_r1 = "EventStream" + TaskEventStreamRoute_r1 = "TaskEventStream" + LRPInstanceEventStreamRoute_r1 = "LRPInstanceEventStream" + //Deprecated: use LRPInstanceEventStreamRoute_1 instead + EventStreamRoute_r0 = "EventStream_r0" + // Deprecated: use TaskEventStreamRoute_r1 instead + TaskEventStreamRoute_r0 = "TaskEventStream_r0" + //Deprecated: use LrpInstanceEventStreamRoute_r1 instead + LrpInstanceEventStreamRoute_r0 = "LrpInstanceEventStream_r0" + + // Cell Presence + CellsRoute_r0 = "Cells" +) + +var Routes = rata.Routes{ + // Ping + {Path: "/v1/ping", Method: "POST", Name: PingRoute_r0}, + + // Domains + {Path: "/v1/domains/list", Method: "POST", Name: DomainsRoute_r0}, + {Path: "/v1/domains/upsert", Method: "POST", Name: UpsertDomainRoute_r0}, + + // Actual LRPs + {Path: "/v1/actual_lrps/list", Method: "POST", Name: ActualLRPsRoute_r0}, + {Path: "/v1/actual_lrp_groups/list", Method: "POST", Name: ActualLRPGroupsRoute_r0}, // DEPRECATED + {Path: "/v1/actual_lrp_groups/list_by_process_guid", Method: "POST", Name: ActualLRPGroupsByProcessGuidRoute_r0}, // DEPRECATED + {Path: "/v1/actual_lrp_groups/get_by_process_guid_and_index", Method: "POST", Name: ActualLRPGroupByProcessGuidAndIndexRoute_r0}, // DEPRECATED + + // Actual LRP Lifecycle + {Path: "/v1/actual_lrps/claim", Method: "POST", Name: ClaimActualLRPRoute_r0}, + {Path: "/v1/actual_lrps/start.r1", Method: "POST", Name: StartActualLRPRoute_r1}, + {Path: "/v1/actual_lrps/start", Method: "POST", Name: StartActualLRPRoute_r0}, + {Path: "/v1/actual_lrps/crash", Method: "POST", Name: CrashActualLRPRoute_r0}, + {Path: "/v1/actual_lrps/fail", Method: "POST", Name: FailActualLRPRoute_r0}, + {Path: "/v1/actual_lrps/remove", Method: "POST", Name: RemoveActualLRPRoute_r0}, + {Path: "/v1/actual_lrps/retire", Method: "POST", Name: RetireActualLRPRoute_r0}, + + // Evacuation + {Path: "/v1/actual_lrps/remove_evacuating", Method: "POST", Name: RemoveEvacuatingActualLRPRoute_r0}, + {Path: "/v1/actual_lrps/evacuate_claimed", Method: "POST", Name: EvacuateClaimedActualLRPRoute_r0}, + {Path: "/v1/actual_lrps/evacuate_crashed", Method: "POST", Name: EvacuateCrashedActualLRPRoute_r0}, + {Path: "/v1/actual_lrps/evacuate_stopped", Method: "POST", Name: EvacuateStoppedActualLRPRoute_r0}, + {Path: "/v1/actual_lrps/evacuate_running.r1", Method: "POST", Name: EvacuateRunningActualLRPRoute_r1}, + {Path: "/v1/actual_lrps/evacuate_running", Method: "POST", Name: EvacuateRunningActualLRPRoute_r0}, + + // Desired LRPs + {Path: "/v1/desired_lrp_scheduling_infos/list", Method: "POST", Name: DesiredLRPSchedulingInfosRoute_r0}, + {Path: "/v1/desired_lrp_scheduling_infos/get_by_process_guid", Method: "POST", Name: DesiredLRPSchedulingInfoByProcessGuid_r0}, + {Path: "/v1/desired_lrp_routing_infos/list", Method: "POST", Name: DesiredLRPRoutingInfosRoute_r0}, + + {Path: "/v1/desired_lrps/list.r3", Method: "POST", Name: DesiredLRPsRoute_r3}, + {Path: "/v1/desired_lrps/get_by_process_guid.r3", Method: "POST", Name: DesiredLRPByProcessGuidRoute_r3}, + {Path: "/v1/desired_lrps/list.r2", Method: "POST", Name: DesiredLRPsRoute_r2}, // DEPRECATED + {Path: "/v1/desired_lrps/get_by_process_guid.r2", Method: "POST", Name: DesiredLRPByProcessGuidRoute_r2}, // DEPRECATED + + // Desire LPR Lifecycle + {Path: "/v1/desired_lrp/desire.r2", Method: "POST", Name: DesireDesiredLRPRoute_r2}, + {Path: "/v1/desired_lrp/update", Method: "POST", Name: UpdateDesiredLRPRoute_r0}, + {Path: "/v1/desired_lrp/remove", Method: "POST", Name: RemoveDesiredLRPRoute_r0}, + + // Tasks + {Path: "/v1/tasks/list.r3", Method: "POST", Name: TasksRoute_r3}, + {Path: "/v1/tasks/get_by_task_guid.r3", Method: "POST", Name: TaskByGuidRoute_r3}, + {Path: "/v1/tasks/list.r2", Method: "POST", Name: TasksRoute_r2}, // DEPRECATED + {Path: "/v1/tasks/get_by_task_guid.r2", Method: "POST", Name: TaskByGuidRoute_r2}, // DEPRECATED + + // Task Lifecycle + {Path: "/v1/tasks/desire.r2", Method: "POST", Name: DesireTaskRoute_r2}, + {Path: "/v1/tasks/start", Method: "POST", Name: StartTaskRoute_r0}, + {Path: "/v1/tasks/cancel", Method: "POST", Name: CancelTaskRoute_r0}, + {Path: "/v1/tasks/fail", Method: "POST", Name: FailTaskRoute_r0}, // DEPRECATED + {Path: "/v1/tasks/reject", Method: "POST", Name: RejectTaskRoute_r0}, + {Path: "/v1/tasks/complete", Method: "POST", Name: CompleteTaskRoute_r0}, + {Path: "/v1/tasks/resolving", Method: "POST", Name: ResolvingTaskRoute_r0}, + {Path: "/v1/tasks/delete", Method: "POST", Name: DeleteTaskRoute_r0}, + + // Event Streaming + {Path: "/v1/events.r1", Method: "GET", Name: LRPGroupEventStreamRoute_r1}, // DEPRECATED + {Path: "/v1/events/tasks.r1", Method: "POST", Name: TaskEventStreamRoute_r1}, + {Path: "/v1/events/lrp_instances.r1", Method: "POST", Name: LRPInstanceEventStreamRoute_r1}, + {Path: "/v1/events", Method: "GET", Name: EventStreamRoute_r0}, // DEPRECATED + {Path: "/v1/events/tasks", Method: "POST", Name: TaskEventStreamRoute_r0}, // DEPRECATED + {Path: "/v1/events/lrp_instances", Method: "POST", Name: LrpInstanceEventStreamRoute_r0}, // DEPRECATED + + // Cells + {Path: "/v1/cells/list.r1", Method: "POST", Name: CellsRoute_r0}, +} diff --git a/vendor/code.cloudfoundry.org/bbs/trace/request_id.go b/vendor/code.cloudfoundry.org/bbs/trace/request_id.go new file mode 100644 index 00000000..3eff1ee5 --- /dev/null +++ b/vendor/code.cloudfoundry.org/bbs/trace/request_id.go @@ -0,0 +1,53 @@ +package trace + +import ( + "context" + "net/http" + "strings" + + "code.cloudfoundry.org/lager/v3" + "github.com/openzipkin/zipkin-go/idgenerator" + "github.com/openzipkin/zipkin-go/model" +) + +const ( + RequestIdHeader = "X-Vcap-Request-Id" +) + +type RequestIdHeaderCtxKeyType struct{} + +var RequestIdHeaderCtxKey = RequestIdHeaderCtxKeyType{} + +func ContextWithRequestId(req *http.Request) context.Context { + return context.WithValue(req.Context(), RequestIdHeaderCtxKey, RequestIdFromRequest(req)) +} + +func RequestIdFromContext(ctx context.Context) string { + if val, ok := ctx.Value(RequestIdHeaderCtxKey).(string); ok { + return val + } + + return "" +} + +func RequestIdFromRequest(req *http.Request) string { + return req.Header.Get(RequestIdHeader) +} + +func LoggerWithTraceInfo(logger lager.Logger, traceIDStr string) lager.Logger { + if traceIDStr == "" { + return logger.WithData(nil) + } + traceHex := strings.Replace(traceIDStr, "-", "", -1) + traceID, err := model.TraceIDFromHex(traceHex) + if err != nil { + return logger.WithData(nil) + } + + spanID := idgenerator.NewRandom128().SpanID(model.TraceID{}) + return logger.WithData(lager.Data{"trace-id": traceID.String(), "span-id": spanID.String()}) +} + +func GenerateTraceID() string { + return idgenerator.NewRandom128().TraceID().String() +} diff --git a/vendor/code.cloudfoundry.org/cfhttp/v2/.gitignore b/vendor/code.cloudfoundry.org/cfhttp/v2/.gitignore new file mode 100644 index 00000000..9ed3b07c --- /dev/null +++ b/vendor/code.cloudfoundry.org/cfhttp/v2/.gitignore @@ -0,0 +1 @@ +*.test diff --git a/vendor/code.cloudfoundry.org/cfhttp/v2/CODEOWNERS b/vendor/code.cloudfoundry.org/cfhttp/v2/CODEOWNERS new file mode 100644 index 00000000..6a633c7e --- /dev/null +++ b/vendor/code.cloudfoundry.org/cfhttp/v2/CODEOWNERS @@ -0,0 +1 @@ +* @cloudfoundry/wg-app-runtime-platform-diego-approvers diff --git a/vendor/code.cloudfoundry.org/cfhttp/v2/LICENSE b/vendor/code.cloudfoundry.org/cfhttp/v2/LICENSE new file mode 100644 index 00000000..f49a4e16 --- /dev/null +++ b/vendor/code.cloudfoundry.org/cfhttp/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/code.cloudfoundry.org/cfhttp/v2/NOTICE b/vendor/code.cloudfoundry.org/cfhttp/v2/NOTICE new file mode 100644 index 00000000..3c8dd5b6 --- /dev/null +++ b/vendor/code.cloudfoundry.org/cfhttp/v2/NOTICE @@ -0,0 +1,20 @@ +Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. + +This project contains software that is Copyright (c) 2014-2015 Pivotal Software, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +This project may include a number of subcomponents with separate +copyright notices and license terms. Your use of these subcomponents +is subject to the terms and conditions of each subcomponent's license, +as noted in the LICENSE file. diff --git a/vendor/code.cloudfoundry.org/cfhttp/v2/README.md b/vendor/code.cloudfoundry.org/cfhttp/v2/README.md new file mode 100644 index 00000000..a3a30305 --- /dev/null +++ b/vendor/code.cloudfoundry.org/cfhttp/v2/README.md @@ -0,0 +1,30 @@ +# cfhttp + +[![Go Report +Card](https://goreportcard.com/badge/code.cloudfoundry.org/cfhttp)](https://goreportcard.com/report/code.cloudfoundry.org/cfhttp) +[![Go +Reference](https://pkg.go.dev/badge/code.cloudfoundry.org/cfhttp.svg)](https://pkg.go.dev/code.cloudfoundry.org/cfhttp) + +Wrapper for official go http package + +> \[!NOTE\] +> +> This repository should be imported as +> `code.cloudfoundry.org/cfhttp/v2`. + +# Contributing + +See the [Contributing.md](./.github/CONTRIBUTING.md) for more +information on how to contribute. + +# Working Group Charter + +This repository is maintained by [App Runtime +Platform](https://github.com/cloudfoundry/community/blob/main/toc/working-groups/app-runtime-platform.md) +under `Diego` area. + +> \[!IMPORTANT\] +> +> Content in this file is managed by the [CI task +> `sync-readme`](https://github.com/cloudfoundry/wg-app-platform-runtime-ci/blob/c83c224ad06515ed52f51bdadf6075f56300ec93/shared/tasks/sync-readme/metadata.yml) +> and is generated by CI following a convention. diff --git a/vendor/code.cloudfoundry.org/cfhttp/v2/client.go b/vendor/code.cloudfoundry.org/cfhttp/v2/client.go new file mode 100644 index 00000000..22e6d2c5 --- /dev/null +++ b/vendor/code.cloudfoundry.org/cfhttp/v2/client.go @@ -0,0 +1,129 @@ +// Package cfhttp provides defaults and helpers for building http clients. +// It serves to help maintain the same HTTP configuration across multiple +// CloudFoundry components. +package cfhttp + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +type config struct { + requestTimeout time.Duration + dialTimeout time.Duration + tcpKeepAliveTimeout time.Duration + idleConnTimeout time.Duration + disableKeepAlives bool + maxIdleConnsPerHost int + tlsConfig *tls.Config +} + +// Option can be used to configure different parts of the HTTP client, including +// its internal transport or the connection dialer. +type Option func(*config) + +// WithStreamingDefaults modifies the HTTP client with defaults that are more +// suitable for consuming server-sent events on persistent connections. +func WithStreamingDefaults() Option { + return func(c *config) { + c.tcpKeepAliveTimeout = 30 * time.Second + c.disableKeepAlives = false + c.requestTimeout = 0 + } +} + +// WithRequestTimeout sets the total time limit for requests made by this Client. +// +// A setting of 0 means no timeout. +func WithRequestTimeout(t time.Duration) Option { + return func(c *config) { + c.requestTimeout = t + } +} + +// WithDialTimeout sets the time limit for connecting to the remote address. This +// includes DNS resolution and retries on multiple IP addresses. +// +// A setting of 0 means no timeout. +func WithDialTimeout(t time.Duration) Option { + return func(c *config) { + c.dialTimeout = t + } +} + +// WithTCPKeepAliveTimeout sets the keep-alive period for an active TCP +// connection. +// +// A setting of 0 disables TCP keep-alives. +func WithTCPKeepAliveTimeout(t time.Duration) Option { + return func(c *config) { + c.tcpKeepAliveTimeout = t + } +} + +// WithIdleConnTimeout sets the maximum amount of time a keep-alive +// connection can be idle before it closes itself. +// +// A setting of 0 means no timeout. +func WithIdleConnTimeout(t time.Duration) Option { + return func(c *config) { + c.idleConnTimeout = t + } +} + +// WithDisableKeepAlives disables keep-alive on every HTTP connection so that +// every connection is closed as soon as its request is done. +func WithDisableKeepAlives() Option { + return func(c *config) { + c.disableKeepAlives = true + } +} + +// WithMaxIdleConnsPerHost sets the maximum number of keep-alive connections that +// can be active at a time per remote host. +// +// A setting of 0 sets means the MaxIdleConnsPerHost is +// http.DefaultMaxIdleConnsPerHost (2 at the time of writing). +func WithMaxIdleConnsPerHost(max int) Option { + return func(c *config) { + c.maxIdleConnsPerHost = max + } +} + +// WithTLSConfig sets the TLS configuration on the HTTP client. +func WithTLSConfig(t *tls.Config) Option { + return func(c *config) { + c.tlsConfig = t + } +} + +// NewClient builds a HTTP client with suitable defaults. +// The Options can optionally set configuration options on the +// HTTP client, transport, or net dialer. Options are applied +// in the order that they are passed in, so it is possible for +// later Options previous ones. +func NewClient(options ...Option) *http.Client { + cfg := config{ + dialTimeout: 5 * time.Second, + tcpKeepAliveTimeout: 0, + idleConnTimeout: 90 * time.Second, + } + for _, v := range options { + v(&cfg) + } + return &http.Client{ + Transport: &http.Transport{ + DialContext: (&net.Dialer{ + Timeout: cfg.dialTimeout, + KeepAlive: cfg.tcpKeepAliveTimeout, + }).DialContext, + IdleConnTimeout: cfg.idleConnTimeout, + DisableKeepAlives: cfg.disableKeepAlives, + MaxIdleConnsPerHost: cfg.maxIdleConnsPerHost, + TLSClientConfig: cfg.tlsConfig, + }, + Timeout: cfg.requestTimeout, + } +} diff --git a/vendor/code.cloudfoundry.org/cfhttp/v2/package.go b/vendor/code.cloudfoundry.org/cfhttp/v2/package.go new file mode 100644 index 00000000..13638153 --- /dev/null +++ b/vendor/code.cloudfoundry.org/cfhttp/v2/package.go @@ -0,0 +1 @@ +package cfhttp // import "code.cloudfoundry.org/cfhttp/v2" diff --git a/vendor/code.cloudfoundry.org/cfhttp/v2/staticcheck.conf b/vendor/code.cloudfoundry.org/cfhttp/v2/staticcheck.conf new file mode 100644 index 00000000..eba7af74 --- /dev/null +++ b/vendor/code.cloudfoundry.org/cfhttp/v2/staticcheck.conf @@ -0,0 +1 @@ +checks = ["all", "-ST1008","-ST1005","-ST1001","-ST1012","-ST1000","-ST1003","-ST1016","-ST1020","-ST1021","-ST1022"] diff --git a/vendor/code.cloudfoundry.org/lager/v3/.gitignore b/vendor/code.cloudfoundry.org/lager/v3/.gitignore new file mode 100644 index 00000000..bc1e5082 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/.gitignore @@ -0,0 +1,38 @@ +# Builds +bin + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# IntelliJ +.idea + +# Dependencies +vendor + +# macOS +.DS_Store + +# Vim files +[._]*.s[a-v][a-z] +!*.svg # comment out if you don't need vector files +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] +Session.vim +Sessionx.vim +.netrwhist +*~ +tags +[._]*.un~ + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/code.cloudfoundry.org/lager/v3/CODEOWNERS b/vendor/code.cloudfoundry.org/lager/v3/CODEOWNERS new file mode 100644 index 00000000..6a633c7e --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/CODEOWNERS @@ -0,0 +1 @@ +* @cloudfoundry/wg-app-runtime-platform-diego-approvers diff --git a/vendor/code.cloudfoundry.org/lager/v3/LICENSE b/vendor/code.cloudfoundry.org/lager/v3/LICENSE new file mode 100644 index 00000000..f49a4e16 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/code.cloudfoundry.org/lager/v3/NOTICE b/vendor/code.cloudfoundry.org/lager/v3/NOTICE new file mode 100644 index 00000000..3c8dd5b6 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/NOTICE @@ -0,0 +1,20 @@ +Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. + +This project contains software that is Copyright (c) 2014-2015 Pivotal Software, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +This project may include a number of subcomponents with separate +copyright notices and license terms. Your use of these subcomponents +is subject to the terms and conditions of each subcomponent's license, +as noted in the LICENSE file. diff --git a/vendor/code.cloudfoundry.org/lager/v3/README.md b/vendor/code.cloudfoundry.org/lager/v3/README.md new file mode 100644 index 00000000..9a4248ad --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/README.md @@ -0,0 +1,102 @@ +lager +===== + +**Note**: This repository should be imported as `code.cloudfoundry.org/lager`. + +Lager is a logging library for go. + +## Usage + +Instantiate a logger with the name of your component. + +```go +import ( + "code.cloudfoundry.org/lager/v3" +) + +logger := lager.NewLogger("my-app") +``` + +### Lager and [`log/slog`](https://pkg.go.dev/log/slog) +Lager was written long before Go 1.21 introduced structured logging in the standard library. +There are some wrapper functions for interoperability between Lager and `slog`, +which are only available when using Go 1.21 and higher. + +Lager can be used as an [`slog.Handler`](https://pkg.go.dev/log/slog#Handler) using the `NewHandler()` function: + +```go +func codeThatAcceptsSlog(l *slog.Logger) { ... } + +lagerLogger := lager.NewLogger("my-lager-logger") + +codeThatAcceptsSlog(slog.New(lager.NewHandler(lagerLogger))) +``` + +An `slog.Logger` can be used as a Lager `Sink` using the `NewSlogSink()` function: +```go +var *slog.Logger l = codeThatReturnsSlog() + +lagerLogger := lager.NewLogger("my-lager-logger") + +lagerLogger.RegisterSink(lager.NewSlogSink(l)) +``` + +### Sinks + +Lager can write logs to a variety of destinations. You can specify the destinations +using Lager sinks: + +To write to an arbitrary `Writer` object: + +```go +logger.RegisterSink(lager.NewWriterSink(myWriter, lager.INFO)) +``` + +### Emitting logs + +Lager supports the usual level-based logging, with an optional argument for arbitrary key-value data. + +```go +logger.Info("doing-stuff", lager.Data{ + "informative": true, +}) +``` + +output: +```json +{ "source": "my-app", "message": "doing-stuff", "data": { "informative": true }, "timestamp": 1232345, "log_level": 1 } +``` + +Error messages also take an `Error` object: + +```go +logger.Error("failed-to-do-stuff", errors.New("Something went wrong")) +``` + +output: +```json +{ "source": "my-app", "message": "failed-to-do-stuff", "data": { "error": "Something went wrong" }, "timestamp": 1232345, "log_level": 1 } +``` + +### Sessions + +You can avoid repetition of contextual data using 'Sessions': + +```go + +contextualLogger := logger.Session("my-task", lager.Data{ + "request-id": 5, +}) + +contextualLogger.Info("my-action") +``` + +output: + +```json +{ "source": "my-app", "message": "my-task.my-action", "data": { "request-id": 5 }, "timestamp": 1232345, "log_level": 1 } +``` + +## License + +Lager is [Apache 2.0](https://github.com/cloudfoundry/lager/blob/master/LICENSE) licensed. diff --git a/vendor/code.cloudfoundry.org/lager/v3/handler.go b/vendor/code.cloudfoundry.org/lager/v3/handler.go new file mode 100644 index 00000000..2cdaf7c3 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/handler.go @@ -0,0 +1,162 @@ +//go:build go1.21 + +package lager + +import ( + "context" + "fmt" + "log/slog" +) + +// NewHandler wraps the logger as a slog.Handler +// The supplied Logger must be a lager.logger +// type created by lager.NewLogger(), otherwise +// it panics. +// +// Note the following log level conversions: +// +// slog.LevelDebug -> lager.DEBUG +// slog.LevelError -> lager.ERROR +// slog.LevelError -> lager.FATAL +// default -> lager.INFO +func NewHandler(l Logger) slog.Handler { + switch ll := l.(type) { + case *logger: + return &handler{logger: ll} + default: + panic("lager.Logger must be an instance of lager.logger") + } +} + +// Type decorator is used to decorate the attributes with groups and more attributes +type decorator func(map[string]any) map[string]any + +// Type handler is a slog.Handler that wraps a lager logger. +// It uses the logger concrete type rather than the Logger interface +// because it uses methods not available on the interface. +type handler struct { + logger *logger + decorators []decorator +} + +// Enabled always returns true +func (h *handler) Enabled(_ context.Context, _ slog.Level) bool { + return true +} + +// Handle converts a slog.Record into a lager.LogFormat and passes it to every Sink +func (h *handler) Handle(_ context.Context, r slog.Record) error { + log := LogFormat{ + time: r.Time, + Timestamp: formatTimestamp(r.Time), + Source: h.logger.component, + Message: fmt.Sprintf("%s.%s", h.logger.task, r.Message), + LogLevel: toLogLevel(r.Level), + Data: h.logger.baseData(h.decorate(attrFromRecord(r))), + } + + for _, sink := range h.logger.sinks { + sink.Log(log) + } + + return nil +} + +// WithAttrs returns a new slog.Handler which always adds the specified attributes +func (h *handler) WithAttrs(attrs []slog.Attr) slog.Handler { + return &handler{ + logger: h.logger, + decorators: append(h.decorators, attrDecorator(attrs)), + } +} + +// WithGroup returns a new slog.Handler which always logs attributes in the specified group +func (h *handler) WithGroup(name string) slog.Handler { + return &handler{ + logger: h.logger, + decorators: append(h.decorators, groupDecorator(name)), + } +} + +// decorate will decorate a body using the decorators that have been defined +func (h *handler) decorate(body map[string]any) map[string]any { + for i := len(h.decorators) - 1; i >= 0; i-- { // reverse iteration + body = h.decorators[i](body) + } + return body +} + +// attrDecorator returns a decorator for the specified attributes +func attrDecorator(attrs []slog.Attr) decorator { + return func(body map[string]any) map[string]any { + if body == nil { + body = make(map[string]any) + } + processAttrs(attrs, body) + return body + } +} + +// groupDecorator returns a decorator for the specified group name +func groupDecorator(group string) decorator { + return func(body map[string]any) map[string]any { + switch len(body) { + case 0: + return nil + default: + return map[string]any{group: body} + } + } +} + +// attrFromRecord extracts and processes the attributes from a record +func attrFromRecord(r slog.Record) map[string]any { + if r.NumAttrs() == 0 { + return nil + } + + body := make(map[string]any, r.NumAttrs()) + r.Attrs(func(attr slog.Attr) bool { + processAttr(attr, body) + return true + }) + + return body +} + +// processAttrs calls processAttr() for each attribute +func processAttrs(attrs []slog.Attr, target map[string]any) { + for _, attr := range attrs { + processAttr(attr, target) + } +} + +// processAttr adds the attribute to the target with appropriate transformations +func processAttr(attr slog.Attr, target map[string]any) { + rv := attr.Value.Resolve() + + switch { + case rv.Kind() == slog.KindGroup && attr.Key != "": + nt := make(map[string]any) + processAttrs(attr.Value.Group(), nt) + target[attr.Key] = nt + case rv.Kind() == slog.KindGroup && attr.Key == "": + processAttrs(attr.Value.Group(), target) + case attr.Key == "": + // skip + default: + target[attr.Key] = rv.Any() + } +} + +// toLogLevel converts from slog levels to lager levels +func toLogLevel(l slog.Level) LogLevel { + switch l { + case slog.LevelDebug: + return DEBUG + case slog.LevelError, slog.LevelWarn: + return ERROR + default: + return INFO + } +} diff --git a/vendor/code.cloudfoundry.org/lager/v3/internal/truncate/package.go b/vendor/code.cloudfoundry.org/lager/v3/internal/truncate/package.go new file mode 100644 index 00000000..c34b9ade --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/internal/truncate/package.go @@ -0,0 +1 @@ +package truncate // import "code.cloudfoundry.org/lager/v3/internal/truncate" diff --git a/vendor/code.cloudfoundry.org/lager/v3/internal/truncate/truncate.go b/vendor/code.cloudfoundry.org/lager/v3/internal/truncate/truncate.go new file mode 100644 index 00000000..f4fda22d --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/internal/truncate/truncate.go @@ -0,0 +1,174 @@ +package truncate + +import ( + "reflect" +) + +// Value recursively walks through the value provided by `v` and truncates +// any strings longer than `maxLength`. +// Example: +// type foobar struct{A string; B string} +// truncate.Value(foobar{A:"foo",B:"bar"}, 20) == foobar{A:"foo",B:"bar"} +// truncate.Value(foobar{A:strings.Repeat("a", 25),B:"bar"}, 20) == foobar{A:"aaaaaaaa-(truncated)",B:"bar"} +func Value(v interface{}, maxLength int) interface{} { + rv := reflect.ValueOf(v) + tv := truncateValue(rv, maxLength) + if rv != tv { + return tv.Interface() + } + return v +} + +func truncateValue(rv reflect.Value, maxLength int) reflect.Value { + if maxLength <= 0 { + return rv + } + + switch rv.Kind() { + case reflect.Interface: + return truncateInterface(rv, maxLength) + case reflect.Ptr: + return truncatePtr(rv, maxLength) + case reflect.Struct: + return truncateStruct(rv, maxLength) + case reflect.Map: + return truncateMap(rv, maxLength) + case reflect.Array: + return truncateArray(rv, maxLength) + case reflect.Slice: + return truncateSlice(rv, maxLength) + case reflect.String: + return truncateString(rv, maxLength) + } + return rv +} + +func truncateInterface(rv reflect.Value, maxLength int) reflect.Value { + tv := truncateValue(rv.Elem(), maxLength) + if tv != rv.Elem() { + return tv + } + return rv +} + +func truncatePtr(rv reflect.Value, maxLength int) reflect.Value { + tv := truncateValue(rv.Elem(), maxLength) + if rv.Elem() != tv { + tvp := reflect.New(rv.Elem().Type()) + tvp.Elem().Set(tv) + return tvp + } + return rv +} + +func truncateStruct(rv reflect.Value, maxLength int) reflect.Value { + numFields := rv.NumField() + fields := make([]reflect.Value, numFields) + changed := false + for i := 0; i < numFields; i++ { + fv := rv.Field(i) + tv := truncateValue(fv, maxLength) + if fv != tv { + changed = true + } + fields[i] = tv + } + if changed { + nv := reflect.New(rv.Type()).Elem() + for i, fv := range fields { + nv.Field(i).Set(fv) + } + return nv + } + return rv +} + +func truncateMap(rv reflect.Value, maxLength int) reflect.Value { + keys := rv.MapKeys() + truncatedMap := make(map[reflect.Value]reflect.Value) + changed := false + for _, key := range keys { + mapV := rv.MapIndex(key) + tv := truncateValue(mapV, maxLength) + if mapV != tv { + changed = true + } + truncatedMap[key] = tv + } + if changed { + nv := reflect.MakeMap(rv.Type()) + for k, v := range truncatedMap { + nv.SetMapIndex(k, v) + } + return nv + } + return rv + +} + +func truncateArray(rv reflect.Value, maxLength int) reflect.Value { + return truncateList(rv, maxLength, func(size int) reflect.Value { + arrayType := reflect.ArrayOf(size, rv.Index(0).Type()) + return reflect.New(arrayType).Elem() + }) +} + +func truncateSlice(rv reflect.Value, maxLength int) reflect.Value { + return truncateList(rv, maxLength, func(size int) reflect.Value { + return reflect.MakeSlice(rv.Type(), size, size) + }) +} + +func truncateList(rv reflect.Value, maxLength int, newList func(size int) reflect.Value) reflect.Value { + size := rv.Len() + truncatedValues := make([]reflect.Value, size) + changed := false + for i := 0; i < size; i++ { + elemV := rv.Index(i) + tv := truncateValue(elemV, maxLength) + if elemV != tv { + changed = true + } + truncatedValues[i] = tv + } + if changed { + nv := newList(size) + for i, v := range truncatedValues { + nv.Index(i).Set(v) + } + return nv + } + return rv +} + +func truncateString(rv reflect.Value, maxLength int) reflect.Value { + s := String(rv.String(), maxLength) + if s != rv.String() { + return reflect.ValueOf(s) + } + return rv + +} + +const truncated = "-(truncated)" +const lenTruncated = len(truncated) + +// String truncates long strings from the middle, but leaves strings shorter +// than `maxLength` untouched. +// If the string is shorter than the string "-(truncated)" and the string +// exceeds `maxLength`, the output will not be truncated. +// Example: +// truncate.String(strings.Repeat("a", 25), 20) == "aaaaaaaa-(truncated)" +// truncate.String("foobar", 20) == "foobar" +// truncate.String("foobar", 5) == "foobar" +func String(s string, maxLength int) string { + if maxLength <= 0 || len(s) < lenTruncated || len(s) <= maxLength { + return s + } + + strBytes := []byte(s) + truncatedBytes := []byte(truncated) + prefixLength := maxLength - lenTruncated + prefix := strBytes[0:prefixLength] + return string(append(prefix, truncatedBytes...)) +} diff --git a/vendor/code.cloudfoundry.org/lager/v3/json_redacter.go b/vendor/code.cloudfoundry.org/lager/v3/json_redacter.go new file mode 100644 index 00000000..a0901480 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/json_redacter.go @@ -0,0 +1,115 @@ +package lager + +import ( + "encoding/json" + "regexp" +) + +const awsAccessKeyIDPattern = `AKIA[A-Z0-9]{16}` +const awsSecretAccessKeyPattern = `KEY["']?\s*(?::|=>|=)\s*["']?[A-Z0-9/\+=]{40}["']?` +const cryptMD5Pattern = `\$1\$[A-Z0-9./]{1,16}\$[A-Z0-9./]{22}` +const cryptSHA256Pattern = `\$5\$[A-Z0-9./]{1,16}\$[A-Z0-9./]{43}` +const cryptSHA512Pattern = `\$6\$[A-Z0-9./]{1,16}\$[A-Z0-9./]{86}` +const privateKeyHeaderPattern = `-----BEGIN(.*)PRIVATE KEY-----` + +type JSONRedacter struct { + keyMatchers []*regexp.Regexp + valueMatchers []*regexp.Regexp +} + +func NewJSONRedacter(keyPatterns []string, valuePatterns []string) (*JSONRedacter, error) { + if keyPatterns == nil { + keyPatterns = []string{"[Pp]wd", "[Pp]ass"} + } + if valuePatterns == nil { + valuePatterns = DefaultValuePatterns() + } + ret := &JSONRedacter{} + for _, v := range keyPatterns { + r, err := regexp.Compile(v) + if err != nil { + return nil, err + } + ret.keyMatchers = append(ret.keyMatchers, r) + } + for _, v := range valuePatterns { + r, err := regexp.Compile(v) + if err != nil { + return nil, err + } + ret.valueMatchers = append(ret.valueMatchers, r) + } + return ret, nil +} + +func (r JSONRedacter) Redact(data []byte) []byte { + var jsonBlob interface{} + err := json.Unmarshal(data, &jsonBlob) + if err != nil { + return handleError(err) + } + r.redactValue(&jsonBlob) + + data, err = json.Marshal(jsonBlob) + if err != nil { + return handleError(err) + } + + return data +} + +func (r JSONRedacter) redactValue(data *interface{}) interface{} { + if data == nil { + return data + } + + if a, ok := (*data).([]interface{}); ok { + r.redactArray(&a) + } else if m, ok := (*data).(map[string]interface{}); ok { + r.redactObject(&m) + } else if s, ok := (*data).(string); ok { + for _, m := range r.valueMatchers { + if m.MatchString(s) { + (*data) = "*REDACTED*" + break + } + } + } + return (*data) +} + +func (r JSONRedacter) redactArray(data *[]interface{}) { + for i := range *data { + r.redactValue(&((*data)[i])) + } +} + +func (r JSONRedacter) redactObject(data *map[string]interface{}) { + for k, v := range *data { + for _, m := range r.keyMatchers { + if m.MatchString(k) { + (*data)[k] = "*REDACTED*" + break + } + } + if (*data)[k] != "*REDACTED*" { + (*data)[k] = r.redactValue(&v) + } + } +} + +func handleError(err error) []byte { + var content []byte + if _, ok := err.(*json.UnsupportedTypeError); ok { + data := map[string]interface{}{"lager serialisation error": err.Error()} + content, err = json.Marshal(data) + } + if err != nil { + panic(err) + } + return content +} + +func DefaultValuePatterns() []string { + return []string{awsAccessKeyIDPattern, awsSecretAccessKeyPattern, cryptMD5Pattern, cryptSHA256Pattern, cryptSHA512Pattern, privateKeyHeaderPattern} +} diff --git a/vendor/code.cloudfoundry.org/lager/v3/logger.go b/vendor/code.cloudfoundry.org/lager/v3/logger.go new file mode 100644 index 00000000..64a29d7e --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/logger.go @@ -0,0 +1,217 @@ +package lager + +import ( + "fmt" + "net/http" + "runtime" + "strings" + "sync/atomic" + "time" + + "github.com/openzipkin/zipkin-go/idgenerator" + "github.com/openzipkin/zipkin-go/model" +) + +const ( + StackTraceBufferSize = 1024 * 100 + RequestIdHeader = "X-Vcap-Request-Id" +) + +type Logger interface { + RegisterSink(Sink) + Session(task string, data ...Data) Logger + SessionName() string + Debug(action string, data ...Data) + Info(action string, data ...Data) + Error(action string, err error, data ...Data) + Fatal(action string, err error, data ...Data) + WithData(Data) Logger + WithTraceInfo(*http.Request) Logger +} + +type logger struct { + component string + task string + sinks []Sink + sessionID string + nextSession uint32 + data Data + idGenerator idgenerator.IDGenerator +} + +func NewLogger(component string) Logger { + return &logger{ + component: component, + task: component, + sinks: []Sink{}, + data: Data{}, + idGenerator: idgenerator.NewRandom128(), + } +} + +func (l *logger) RegisterSink(sink Sink) { + l.sinks = append(l.sinks, sink) +} + +func (l *logger) SessionName() string { + return l.task +} + +func (l *logger) Session(task string, data ...Data) Logger { + sid := atomic.AddUint32(&l.nextSession, 1) + + var sessionIDstr string + + if l.sessionID != "" { + sessionIDstr = fmt.Sprintf("%s.%d", l.sessionID, sid) + } else { + sessionIDstr = fmt.Sprintf("%d", sid) + } + + return &logger{ + component: l.component, + task: fmt.Sprintf("%s.%s", l.task, task), + sinks: l.sinks, + sessionID: sessionIDstr, + data: l.baseData(data...), + idGenerator: l.idGenerator, + } +} + +func (l *logger) WithData(data Data) Logger { + return &logger{ + component: l.component, + task: l.task, + sinks: l.sinks, + sessionID: l.sessionID, + data: l.baseData(data), + idGenerator: l.idGenerator, + } +} + +func (l *logger) WithTraceInfo(req *http.Request) Logger { + traceIDHeader := req.Header.Get(RequestIdHeader) + if traceIDHeader == "" { + return l.WithData(nil) + } + traceHex := strings.Replace(traceIDHeader, "-", "", -1) + traceID, err := model.TraceIDFromHex(traceHex) + if err != nil { + return l.WithData(nil) + } + + spanID := l.idGenerator.SpanID(model.TraceID{}) + return l.WithData(Data{"trace-id": traceID.String(), "span-id": spanID.String()}) +} + +func (l *logger) Debug(action string, data ...Data) { + t := time.Now().UTC() + log := LogFormat{ + time: t, + Timestamp: formatTimestamp(t), + Source: l.component, + Message: fmt.Sprintf("%s.%s", l.task, action), + LogLevel: DEBUG, + Data: l.baseData(data...), + } + + for _, sink := range l.sinks { + sink.Log(log) + } +} + +func (l *logger) Info(action string, data ...Data) { + t := time.Now().UTC() + log := LogFormat{ + time: t, + Timestamp: formatTimestamp(t), + Source: l.component, + Message: fmt.Sprintf("%s.%s", l.task, action), + LogLevel: INFO, + Data: l.baseData(data...), + } + + for _, sink := range l.sinks { + sink.Log(log) + } +} + +func (l *logger) Error(action string, err error, data ...Data) { + logData := l.baseData(data...) + + if err != nil { + logData["error"] = err.Error() + } + + t := time.Now().UTC() + log := LogFormat{ + time: t, + Timestamp: formatTimestamp(t), + Source: l.component, + Message: fmt.Sprintf("%s.%s", l.task, action), + LogLevel: ERROR, + Data: logData, + Error: err, + } + + for _, sink := range l.sinks { + sink.Log(log) + } +} + +func (l *logger) Fatal(action string, err error, data ...Data) { + logData := l.baseData(data...) + + stackTrace := make([]byte, StackTraceBufferSize) + stackSize := runtime.Stack(stackTrace, false) + stackTrace = stackTrace[:stackSize] + + if err != nil { + logData["error"] = err.Error() + } + + logData["trace"] = string(stackTrace) + + t := time.Now().UTC() + log := LogFormat{ + time: t, + Timestamp: formatTimestamp(t), + Source: l.component, + Message: fmt.Sprintf("%s.%s", l.task, action), + LogLevel: FATAL, + Data: logData, + Error: err, + } + + for _, sink := range l.sinks { + sink.Log(log) + } + + panic(err) +} + +func (l *logger) baseData(givenData ...Data) Data { + data := Data{} + + for k, v := range l.data { + data[k] = v + } + + if len(givenData) > 0 { + for _, dataArg := range givenData { + for key, val := range dataArg { + data[key] = val + } + } + } + + if l.sessionID != "" { + data["session"] = l.sessionID + } + + return data +} + +func formatTimestamp(t time.Time) string { + return fmt.Sprintf("%.9f", float64(t.UnixNano())/1e9) +} diff --git a/vendor/code.cloudfoundry.org/lager/v3/models.go b/vendor/code.cloudfoundry.org/lager/v3/models.go new file mode 100644 index 00000000..63077e72 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/models.go @@ -0,0 +1,151 @@ +package lager + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + "time" +) + +type LogLevel int + +const ( + DEBUG LogLevel = iota + INFO + ERROR + FATAL +) + +var logLevelStr = [...]string{ + DEBUG: "debug", + INFO: "info", + ERROR: "error", + FATAL: "fatal", +} + +func (l LogLevel) String() string { + if DEBUG <= l && l <= FATAL { + return logLevelStr[l] + } + return "invalid" +} + +func LogLevelFromString(s string) (LogLevel, error) { + for k, v := range logLevelStr { + if v == s { + return LogLevel(k), nil + } + } + return -1, fmt.Errorf("invalid log level: %s", s) +} + +type Data map[string]interface{} + +type rfc3339Time time.Time + +const rfc3339Nano = "2006-01-02T15:04:05.000000000Z07:00" + +func (t rfc3339Time) MarshalJSON() ([]byte, error) { + // Use AppendFormat to avoid slower string operations, instead we only + // operate on a byte slice + // Avoid creating a new copy of t with a cast, instead use type conversion + stamp := append((time.Time)(t).UTC().AppendFormat([]byte{'"'}, rfc3339Nano), '"') + return stamp, nil +} + +func (t *rfc3339Time) UnmarshalJSON(data []byte) error { + return (*time.Time)(t).UnmarshalJSON(data) +} + +type LogFormat struct { + Timestamp string `json:"timestamp"` + Source string `json:"source"` + Message string `json:"message"` + LogLevel LogLevel `json:"log_level"` + Data Data `json:"data"` + Error error `json:"-"` + time time.Time +} + +func (log LogFormat) ToJSON() []byte { + content, err := json.Marshal(log) + if err != nil { + log.Data = dataForJSONMarhallingError(err, log.Data) + content, err = json.Marshal(log) + if err != nil { + panic(err) + } + } + return content +} + +type prettyLogFormat struct { + Timestamp rfc3339Time `json:"timestamp"` + Level string `json:"level"` + Source string `json:"source"` + Message string `json:"message"` + Data Data `json:"data"` + Error error `json:"-"` +} + +func (log LogFormat) toPrettyJSON() []byte { + t := log.time + if t.IsZero() { + t = parseTimestamp(log.Timestamp) + } + + prettyLog := prettyLogFormat{ + Timestamp: rfc3339Time(t), + Level: log.LogLevel.String(), + Source: log.Source, + Message: log.Message, + Data: log.Data, + Error: log.Error, + } + + content, err := json.Marshal(prettyLog) + + if err != nil { + prettyLog.Data = dataForJSONMarhallingError(err, prettyLog.Data) + content, err = json.Marshal(prettyLog) + if err != nil { + panic(err) + } + } + + return content +} + +func dataForJSONMarhallingError(err error, data Data) Data { + _, ok1 := err.(*json.UnsupportedTypeError) + _, ok2 := err.(*json.MarshalerError) + errKey := "unknown_error" + if ok1 || ok2 { + errKey = "lager serialisation error" + } + + return map[string]interface{}{ + errKey: err.Error(), + "data_dump": fmt.Sprintf("%#v", data), + } +} + +func parseTimestamp(s string) time.Time { + if s == "" { + return time.Now() + } + n := strings.IndexByte(s, '.') + if n <= 0 || n == len(s)-1 { + return time.Now() + } + sec, err := strconv.ParseInt(s[:n], 10, 64) + if err != nil || sec < 0 { + return time.Now() + } + nsec, err := strconv.ParseInt(s[n+1:], 10, 64) + if err != nil || nsec < 0 { + return time.Now() + } + return time.Unix(sec, nsec) +} diff --git a/vendor/code.cloudfoundry.org/lager/v3/reconfigurable_sink.go b/vendor/code.cloudfoundry.org/lager/v3/reconfigurable_sink.go new file mode 100644 index 00000000..aeb714d9 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/reconfigurable_sink.go @@ -0,0 +1,37 @@ +package lager + +import ( + "sync/atomic" +) + +type ReconfigurableSink struct { + sink Sink + + minLogLevel int32 +} + +func NewReconfigurableSink(sink Sink, initialMinLogLevel LogLevel) *ReconfigurableSink { + return &ReconfigurableSink{ + sink: sink, + + minLogLevel: int32(initialMinLogLevel), + } +} + +func (sink *ReconfigurableSink) Log(log LogFormat) { + minLogLevel := LogLevel(atomic.LoadInt32(&sink.minLogLevel)) + + if log.LogLevel < minLogLevel { + return + } + + sink.sink.Log(log) +} + +func (sink *ReconfigurableSink) SetMinLevel(level LogLevel) { + atomic.StoreInt32(&sink.minLogLevel, int32(level)) +} + +func (sink *ReconfigurableSink) GetMinLevel() LogLevel { + return LogLevel(atomic.LoadInt32(&sink.minLogLevel)) +} diff --git a/vendor/code.cloudfoundry.org/lager/v3/redacting_sink.go b/vendor/code.cloudfoundry.org/lager/v3/redacting_sink.go new file mode 100644 index 00000000..17a30295 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/redacting_sink.go @@ -0,0 +1,62 @@ +package lager + +import ( + "encoding/json" +) + +type redactingSink struct { + sink Sink + jsonRedacter *JSONRedacter +} + +// NewRedactingSink creates a sink that redacts sensitive information from the +// data field. The old behavior of NewRedactingWriterSink (which was removed +// in v2) can be obtained using the following code: +// +// redactingSink, err := NewRedactingSink( +// NewWriterSink(writer, minLogLevel), +// keyPatterns, +// valuePatterns, +// ) +// +// if err != nil { +// return nil, err +// } +// +// return NewReconfigurableSink( +// redactingSink, +// minLogLevel, +// ), nil +// +func NewRedactingSink(sink Sink, keyPatterns []string, valuePatterns []string) (Sink, error) { + jsonRedacter, err := NewJSONRedacter(keyPatterns, valuePatterns) + if err != nil { + return nil, err + } + + return &redactingSink{ + sink: sink, + jsonRedacter: jsonRedacter, + }, nil +} + +func (sink *redactingSink) Log(log LogFormat) { + rawJSON, err := json.Marshal(log.Data) + if err != nil { + log.Data = dataForJSONMarhallingError(err, log.Data) + + rawJSON, err = json.Marshal(log.Data) + if err != nil { + panic(err) + } + } + + redactedJSON := sink.jsonRedacter.Redact(rawJSON) + + err = json.Unmarshal(redactedJSON, &log.Data) + if err != nil { + panic(err) + } + + sink.sink.Log(log) +} diff --git a/vendor/code.cloudfoundry.org/lager/v3/slog_sink.go b/vendor/code.cloudfoundry.org/lager/v3/slog_sink.go new file mode 100644 index 00000000..095e16a6 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/slog_sink.go @@ -0,0 +1,63 @@ +//go:build go1.21 + +package lager + +import ( + "context" + "log/slog" +) + +// Type slogSink wraps an slog.Logger as a Sink +type slogSink struct { + logger *slog.Logger +} + +// NewSlogSink wraps a slog.Logger as a lager Sink +// This allows code using slog to integrate with code that uses lager +// Note the following log level conversions: +// +// lager.DEBUG -> slog.LevelDebug +// lager.ERROR -> slog.LevelError +// lager.FATAL -> slog.LevelError +// default -> slog.LevelInfo +func NewSlogSink(l *slog.Logger) Sink { + return &slogSink{logger: l} +} + +// Log exists to implement the lager.Sink interface. +func (l *slogSink) Log(f LogFormat) { + // For lager.Error() and lager.Fatal() the error (and stacktrace) are already in f.Data + r := slog.NewRecord(f.time, toSlogLevel(f.LogLevel), f.Message, 0) + r.AddAttrs(toAttr(f.Data)...) + + // By calling the handler directly we can pass through the original timestamp, + // whereas calling a method on the logger would generate a new timestamp + l.logger.Handler().Handle(context.Background(), r) +} + +// toAttr converts a lager.Data into []slog.Attr +func toAttr(d Data) []slog.Attr { + l := len(d) + if l == 0 { + return nil + } + + attr := make([]slog.Attr, 0, l) + for k, v := range d { + attr = append(attr, slog.Any(k, v)) + } + + return attr +} + +// toSlogLevel converts lager log levels to slog levels +func toSlogLevel(l LogLevel) slog.Level { + switch l { + case DEBUG: + return slog.LevelDebug + case ERROR, FATAL: + return slog.LevelError + default: + return slog.LevelInfo + } +} diff --git a/vendor/code.cloudfoundry.org/lager/v3/tools.go b/vendor/code.cloudfoundry.org/lager/v3/tools.go new file mode 100644 index 00000000..56304cc4 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/tools.go @@ -0,0 +1,8 @@ +//go:build tools +// +build tools + +package lager + +import ( + _ "github.com/onsi/ginkgo/v2/ginkgo" +) diff --git a/vendor/code.cloudfoundry.org/lager/v3/truncating_sink.go b/vendor/code.cloudfoundry.org/lager/v3/truncating_sink.go new file mode 100644 index 00000000..ba261fe7 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/truncating_sink.go @@ -0,0 +1,32 @@ +package lager + +import "code.cloudfoundry.org/lager/v3/internal/truncate" + +type truncatingSink struct { + sink Sink + maxDataStringLength int +} + +// NewTruncatingSink returns a sink that truncates strings longer than the max +// data string length +// Example: +// writerSink := lager.NewWriterSink(os.Stdout, lager.INFO) +// sink := lager.NewTruncatingSink(testSink, 20) +// logger := lager.NewLogger("test") +// logger.RegisterSink(sink) +// logger.Info("message", lager.Data{"A": strings.Repeat("a", 25)}) +func NewTruncatingSink(sink Sink, maxDataStringLength int) Sink { + return &truncatingSink{ + sink: sink, + maxDataStringLength: maxDataStringLength, + } +} + +func (sink *truncatingSink) Log(log LogFormat) { + truncatedData := Data{} + for k, v := range log.Data { + truncatedData[k] = truncate.Value(v, sink.maxDataStringLength) + } + log.Data = truncatedData + sink.sink.Log(log) +} diff --git a/vendor/code.cloudfoundry.org/lager/v3/writer_sink.go b/vendor/code.cloudfoundry.org/lager/v3/writer_sink.go new file mode 100644 index 00000000..e78177a5 --- /dev/null +++ b/vendor/code.cloudfoundry.org/lager/v3/writer_sink.go @@ -0,0 +1,66 @@ +package lager + +import ( + "io" + "sync" +) + +// A Sink represents a write destination for a Logger. It provides +// a thread-safe interface for writing logs +type Sink interface { + //Log to the sink. Best effort -- no need to worry about errors. + Log(LogFormat) +} + +type writerSink struct { + writer io.Writer + minLogLevel LogLevel + writeL *sync.Mutex +} + +func NewWriterSink(writer io.Writer, minLogLevel LogLevel) Sink { + return &writerSink{ + writer: writer, + minLogLevel: minLogLevel, + writeL: new(sync.Mutex), + } +} + +func (sink *writerSink) Log(log LogFormat) { + if log.LogLevel < sink.minLogLevel { + return + } + + // Convert to json outside of critical section to minimize time spent holding lock + message := append(log.ToJSON(), '\n') + + sink.writeL.Lock() + sink.writer.Write(message) //nolint:errcheck + sink.writeL.Unlock() +} + +type prettySink struct { + writer io.Writer + minLogLevel LogLevel + writeL sync.Mutex +} + +func NewPrettySink(writer io.Writer, minLogLevel LogLevel) Sink { + return &prettySink{ + writer: writer, + minLogLevel: minLogLevel, + } +} + +func (sink *prettySink) Log(log LogFormat) { + if log.LogLevel < sink.minLogLevel { + return + } + + // Convert to json outside of critical section to minimize time spent holding lock + message := append(log.toPrettyJSON(), '\n') + + sink.writeL.Lock() + sink.writer.Write(message) //nolint:errcheck + sink.writeL.Unlock() +} diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index a8c29bfb..8969526a 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -91,11 +91,12 @@ logr design but also left out some parts and changed others: | Adding a name to a logger | `WithName` | no API | | Modify verbosity of log entries in a call chain | `V` | no API | | Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` | +| Pass context for extracting additional values | no API | API variants like `InfoCtx` | The high-level slog API is explicitly meant to be one of many different APIs that can be layered on top of a shared `slog.Handler`. logr is one such -alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr) -package. +alternative API, with [interoperability](#slog-interoperability) provided by +some conversion functions. ### Inspiration @@ -145,24 +146,24 @@ There are implementations for the following logging libraries: ## slog interoperability Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler` -and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and -`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`. +and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and +`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`. As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level -slog API. `slogr` itself leaves that to the caller. +slog API. -## Using a `logr.Sink` as backend for slog +### Using a `logr.LogSink` as backend for slog Ideally, a logr sink implementation should support both logr and slog by -implementing both the normal logr interface(s) and `slogr.SlogSink`. Because +implementing both the normal logr interface(s) and `SlogSink`. Because of a conflict in the parameters of the common `Enabled` method, it is [not possible to implement both slog.Handler and logr.Sink in the same type](https://github.com/golang/go/issues/59110). If both are supported, log calls can go from the high-level APIs to the backend -without the need to convert parameters. `NewLogr` and `NewSlogHandler` can +without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can convert back and forth without adding additional wrappers, with one exception: when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then -`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future +`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future log calls. Such an implementation should also support values that implement specific @@ -187,13 +188,13 @@ Not supporting slog has several drawbacks: These drawbacks are severe enough that applications using a mixture of slog and logr should switch to a different backend. -## Using a `slog.Handler` as backend for logr +### Using a `slog.Handler` as backend for logr Using a plain `slog.Handler` without support for logr works better than the other direction: - All logr verbosity levels can be mapped 1:1 to their corresponding slog level by negating them. -- Stack unwinding is done by the `slogr.SlogSink` and the resulting program +- Stack unwinding is done by the `SlogSink` and the resulting program counter is passed to the `slog.Handler`. - Names added via `Logger.WithName` are gathered and recorded in an additional attribute with `logger` as key and the names separated by slash as value. @@ -205,27 +206,39 @@ ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility with logr implementations without slog support is not important, then `slog.Valuer` is sufficient. -## Context support for slog +### Context support for slog Storing a logger in a `context.Context` is not supported by -slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this -to fill this gap: - - func HandlerFromContext(ctx context.Context) slog.Handler { - logger, err := logr.FromContext(ctx) - if err == nil { - return slogr.NewSlogHandler(logger) - } - return slog.Default().Handler() - } - - func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context { - return logr.NewContext(ctx, slogr.NewLogr(handler)) - } - -The downside is that storing and retrieving a `slog.Handler` needs more -allocations compared to using a `logr.Logger`. Therefore the recommendation is -to use the `logr.Logger` API in code which uses contextual logging. +slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be +used to fill this gap. They store and retrieve a `slog.Logger` pointer +under the same context key that is also used by `NewContext` and +`FromContext` for `logr.Logger` value. + +When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will +automatically convert the `slog.Logger` to a +`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction. + +With this approach, binaries which use either slog or logr are as efficient as +possible with no unnecessary allocations. This is also why the API stores a +`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger` +on retrieval would need to allocate one. + +The downside is that switching back and forth needs more allocations. Because +logr is the API that is already in use by different packages, in particular +Kubernetes, the recommendation is to use the `logr.Logger` API in code which +uses contextual logging. + +An alternative to adding values to a logger and storing that logger in the +context is to store the values in the context and to configure a logging +backend to extract those values when emitting log entries. This only works when +log calls are passed the context, which is not supported by the logr API. + +With the slog API, it is possible, but not +required. https://github.com/veqryn/slog-context is a package for slog which +provides additional support code for this approach. It also contains wrappers +for the context functions in logr, so developers who prefer to not use the logr +APIs directly can use those instead and the resulting code will still be +interoperable with logr. ## FAQ diff --git a/vendor/github.com/go-logr/logr/context.go b/vendor/github.com/go-logr/logr/context.go new file mode 100644 index 00000000..de8bcc3a --- /dev/null +++ b/vendor/github.com/go-logr/logr/context.go @@ -0,0 +1,33 @@ +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +// contextKey is how we find Loggers in a context.Context. With Go < 1.21, +// the value is always a Logger value. With Go >= 1.21, the value can be a +// Logger value or a slog.Logger pointer. +type contextKey struct{} + +// notFoundError exists to carry an IsNotFound method. +type notFoundError struct{} + +func (notFoundError) Error() string { + return "no logr.Logger was present" +} + +func (notFoundError) IsNotFound() bool { + return true +} diff --git a/vendor/github.com/go-logr/logr/context_noslog.go b/vendor/github.com/go-logr/logr/context_noslog.go new file mode 100644 index 00000000..f012f9a1 --- /dev/null +++ b/vendor/github.com/go-logr/logr/context_noslog.go @@ -0,0 +1,49 @@ +//go:build !go1.21 +// +build !go1.21 + +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" +) + +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v, nil + } + + return Logger{}, notFoundError{} +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. +func FromContextOrDiscard(ctx context.Context) Logger { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v + } + + return Discard() +} + +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} diff --git a/vendor/github.com/go-logr/logr/context_slog.go b/vendor/github.com/go-logr/logr/context_slog.go new file mode 100644 index 00000000..065ef0b8 --- /dev/null +++ b/vendor/github.com/go-logr/logr/context_slog.go @@ -0,0 +1,83 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "fmt" + "log/slog" +) + +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { + v := ctx.Value(contextKey{}) + if v == nil { + return Logger{}, notFoundError{} + } + + switch v := v.(type) { + case Logger: + return v, nil + case *slog.Logger: + return FromSlogHandler(v.Handler()), nil + default: + // Not reached. + panic(fmt.Sprintf("unexpected value type for logr context key: %T", v)) + } +} + +// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found. +func FromContextAsSlogLogger(ctx context.Context) *slog.Logger { + v := ctx.Value(contextKey{}) + if v == nil { + return nil + } + + switch v := v.(type) { + case Logger: + return slog.New(ToSlogHandler(v)) + case *slog.Logger: + return v + default: + // Not reached. + panic(fmt.Sprintf("unexpected value type for logr context key: %T", v)) + } +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. +func FromContextOrDiscard(ctx context.Context) Logger { + if logger, err := FromContext(ctx); err == nil { + return logger + } + return Discard() +} + +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} + +// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the +// provided slog.Logger. +func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index 2a5075a1..b4428e10 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -207,10 +207,6 @@ limitations under the License. // those. package logr -import ( - "context" -) - // New returns a new Logger instance. This is primarily used by libraries // implementing LogSink, rather than end users. Passing a nil sink will create // a Logger which discards all log lines. @@ -410,45 +406,6 @@ func (l Logger) IsZero() bool { return l.sink == nil } -// contextKey is how we find Loggers in a context.Context. -type contextKey struct{} - -// FromContext returns a Logger from ctx or an error if no Logger is found. -func FromContext(ctx context.Context) (Logger, error) { - if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v, nil - } - - return Logger{}, notFoundError{} -} - -// notFoundError exists to carry an IsNotFound method. -type notFoundError struct{} - -func (notFoundError) Error() string { - return "no logr.Logger was present" -} - -func (notFoundError) IsNotFound() bool { - return true -} - -// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this -// returns a Logger that discards all log messages. -func FromContextOrDiscard(ctx context.Context) Logger { - if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v - } - - return Discard() -} - -// NewContext returns a new Context, derived from ctx, which carries the -// provided Logger. -func NewContext(ctx context.Context, logger Logger) context.Context { - return context.WithValue(ctx, contextKey{}, logger) -} - // RuntimeInfo holds information that the logr "core" library knows which // LogSinks might want to know. type RuntimeInfo struct { diff --git a/vendor/github.com/go-logr/logr/slogr/sloghandler.go b/vendor/github.com/go-logr/logr/sloghandler.go similarity index 63% rename from vendor/github.com/go-logr/logr/slogr/sloghandler.go rename to vendor/github.com/go-logr/logr/sloghandler.go index ec6725ce..82d1ba49 100644 --- a/vendor/github.com/go-logr/logr/slogr/sloghandler.go +++ b/vendor/github.com/go-logr/logr/sloghandler.go @@ -17,18 +17,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package slogr +package logr import ( "context" "log/slog" - - "github.com/go-logr/logr" ) type slogHandler struct { // May be nil, in which case all logs get discarded. - sink logr.LogSink + sink LogSink // Non-nil if sink is non-nil and implements SlogSink. slogSink SlogSink @@ -54,7 +52,7 @@ func (l *slogHandler) GetLevel() slog.Level { return l.levelBias } -func (l *slogHandler) Enabled(ctx context.Context, level slog.Level) bool { +func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool { return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level))) } @@ -72,9 +70,7 @@ func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error { kvList := make([]any, 0, 2*record.NumAttrs()) record.Attrs(func(attr slog.Attr) bool { - if attr.Key != "" { - kvList = append(kvList, l.addGroupPrefix(attr.Key), attr.Value.Resolve().Any()) - } + kvList = attrToKVs(attr, l.groupPrefix, kvList) return true }) if record.Level >= slog.LevelError { @@ -90,15 +86,15 @@ func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error { // are called by Handle, code in slog gets skipped. // // This offset currently (Go 1.21.0) works for calls through -// slog.New(NewSlogHandler(...)). There's no guarantee that the call +// slog.New(ToSlogHandler(...)). There's no guarantee that the call // chain won't change. Wrapping the handler will also break unwinding. It's // still better than not adjusting at all.... // -// This cannot be done when constructing the handler because NewLogr needs +// This cannot be done when constructing the handler because FromSlogHandler needs // access to the original sink without this adjustment. A second copy would // work, but then WithAttrs would have to be called for both of them. -func (l *slogHandler) sinkWithCallDepth() logr.LogSink { - if sink, ok := l.sink.(logr.CallDepthLogSink); ok { +func (l *slogHandler) sinkWithCallDepth() LogSink { + if sink, ok := l.sink.(CallDepthLogSink); ok { return sink.WithCallDepth(2) } return l.sink @@ -109,60 +105,88 @@ func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler { return l } - copy := *l + clone := *l if l.slogSink != nil { - copy.slogSink = l.slogSink.WithAttrs(attrs) - copy.sink = copy.slogSink + clone.slogSink = l.slogSink.WithAttrs(attrs) + clone.sink = clone.slogSink } else { kvList := make([]any, 0, 2*len(attrs)) for _, attr := range attrs { - if attr.Key != "" { - kvList = append(kvList, l.addGroupPrefix(attr.Key), attr.Value.Resolve().Any()) - } + kvList = attrToKVs(attr, l.groupPrefix, kvList) } - copy.sink = l.sink.WithValues(kvList...) + clone.sink = l.sink.WithValues(kvList...) } - return © + return &clone } func (l *slogHandler) WithGroup(name string) slog.Handler { if l.sink == nil { return l } - copy := *l + if name == "" { + // slog says to inline empty groups + return l + } + clone := *l if l.slogSink != nil { - copy.slogSink = l.slogSink.WithGroup(name) - copy.sink = l.slogSink + clone.slogSink = l.slogSink.WithGroup(name) + clone.sink = clone.slogSink } else { - copy.groupPrefix = copy.addGroupPrefix(name) + clone.groupPrefix = addPrefix(clone.groupPrefix, name) + } + return &clone +} + +// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups +// and other details of slog. +func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any { + attrVal := attr.Value.Resolve() + if attrVal.Kind() == slog.KindGroup { + groupVal := attrVal.Group() + grpKVs := make([]any, 0, 2*len(groupVal)) + prefix := groupPrefix + if attr.Key != "" { + prefix = addPrefix(groupPrefix, attr.Key) + } + for _, attr := range groupVal { + grpKVs = attrToKVs(attr, prefix, grpKVs) + } + kvList = append(kvList, grpKVs...) + } else if attr.Key != "" { + kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any()) } - return © + + return kvList } -func (l *slogHandler) addGroupPrefix(name string) string { - if l.groupPrefix == "" { +func addPrefix(prefix, name string) string { + if prefix == "" { return name } - return l.groupPrefix + groupSeparator + name + if name == "" { + return prefix + } + return prefix + groupSeparator + name } // levelFromSlog adjusts the level by the logger's verbosity and negates it. // It ensures that the result is >= 0. This is necessary because the result is -// passed to a logr.LogSink and that API did not historically document whether +// passed to a LogSink and that API did not historically document whether // levels could be negative or what that meant. // // Some example usage: -// logrV0 := getMyLogger() -// logrV2 := logrV0.V(2) -// slogV2 := slog.New(slogr.NewSlogHandler(logrV2)) -// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) -// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) -// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) +// +// logrV0 := getMyLogger() +// logrV2 := logrV0.V(2) +// slogV2 := slog.New(logr.ToSlogHandler(logrV2)) +// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) +// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) +// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) func (l *slogHandler) levelFromSlog(level slog.Level) int { result := -level - result += l.levelBias // in case the original logr.Logger had a V level + result += l.levelBias // in case the original Logger had a V level if result < 0 { - result = 0 // because logr.LogSink doesn't expect negative V levels + result = 0 // because LogSink doesn't expect negative V levels } return int(result) } diff --git a/vendor/github.com/go-logr/logr/slogr.go b/vendor/github.com/go-logr/logr/slogr.go new file mode 100644 index 00000000..28a83d02 --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogr.go @@ -0,0 +1,100 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "log/slog" +) + +// FromSlogHandler returns a Logger which writes to the slog.Handler. +// +// The logr verbosity level is mapped to slog levels such that V(0) becomes +// slog.LevelInfo and V(4) becomes slog.LevelDebug. +func FromSlogHandler(handler slog.Handler) Logger { + if handler, ok := handler.(*slogHandler); ok { + if handler.sink == nil { + return Discard() + } + return New(handler.sink).V(int(handler.levelBias)) + } + return New(&slogSink{handler: handler}) +} + +// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger. +// +// The returned logger writes all records with level >= slog.LevelError as +// error log entries with LogSink.Error, regardless of the verbosity level of +// the Logger: +// +// logger := +// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...) +// +// The level of all other records gets reduced by the verbosity +// level of the Logger and the result is negated. If it happens +// to be negative, then it gets replaced by zero because a LogSink +// is not expected to handled negative levels: +// +// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...) +// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...) +func ToSlogHandler(logger Logger) slog.Handler { + if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 { + return sink.handler + } + + handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())} + if slogSink, ok := handler.sink.(SlogSink); ok { + handler.slogSink = slogSink + } + return handler +} + +// SlogSink is an optional interface that a LogSink can implement to support +// logging through the slog.Logger or slog.Handler APIs better. It then should +// also support special slog values like slog.Group. When used as a +// slog.Handler, the advantages are: +// +// - stack unwinding gets avoided in favor of logging the pre-recorded PC, +// as intended by slog +// - proper grouping of key/value pairs via WithGroup +// - verbosity levels > slog.LevelInfo can be recorded +// - less overhead +// +// Both APIs (Logger and slog.Logger/Handler) then are supported equally +// well. Developers can pick whatever API suits them better and/or mix +// packages which use either API in the same binary with a common logging +// implementation. +// +// This interface is necessary because the type implementing the LogSink +// interface cannot also implement the slog.Handler interface due to the +// different prototype of the common Enabled method. +// +// An implementation could support both interfaces in two different types, but then +// additional interfaces would be needed to convert between those types in FromSlogHandler +// and ToSlogHandler. +type SlogSink interface { + LogSink + + Handle(ctx context.Context, record slog.Record) error + WithAttrs(attrs []slog.Attr) SlogSink + WithGroup(name string) SlogSink +} diff --git a/vendor/github.com/go-logr/logr/slogr/slogr.go b/vendor/github.com/go-logr/logr/slogr/slogr.go index eb519ae2..36432c56 100644 --- a/vendor/github.com/go-logr/logr/slogr/slogr.go +++ b/vendor/github.com/go-logr/logr/slogr/slogr.go @@ -23,10 +23,11 @@ limitations under the License. // // See the README in the top-level [./logr] package for a discussion of // interoperability. +// +// Deprecated: use the main logr package instead. package slogr import ( - "context" "log/slog" "github.com/go-logr/logr" @@ -34,75 +35,27 @@ import ( // NewLogr returns a logr.Logger which writes to the slog.Handler. // -// The logr verbosity level is mapped to slog levels such that V(0) becomes -// slog.LevelInfo and V(4) becomes slog.LevelDebug. +// Deprecated: use [logr.FromSlogHandler] instead. func NewLogr(handler slog.Handler) logr.Logger { - if handler, ok := handler.(*slogHandler); ok { - if handler.sink == nil { - return logr.Discard() - } - return logr.New(handler.sink).V(int(handler.levelBias)) - } - return logr.New(&slogSink{handler: handler}) + return logr.FromSlogHandler(handler) } // NewSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger. // -// The returned logger writes all records with level >= slog.LevelError as -// error log entries with LogSink.Error, regardless of the verbosity level of -// the logr.Logger: -// -// logger := -// slog.New(NewSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...) -// -// The level of all other records gets reduced by the verbosity -// level of the logr.Logger and the result is negated. If it happens -// to be negative, then it gets replaced by zero because a LogSink -// is not expected to handled negative levels: -// -// slog.New(NewSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...) -// slog.New(NewSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...) -// slog.New(NewSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...) -// slog.New(NewSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...) +// Deprecated: use [logr.ToSlogHandler] instead. func NewSlogHandler(logger logr.Logger) slog.Handler { - if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 { - return sink.handler - } + return logr.ToSlogHandler(logger) +} - handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())} - if slogSink, ok := handler.sink.(SlogSink); ok { - handler.slogSink = slogSink - } - return handler +// ToSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger. +// +// Deprecated: use [logr.ToSlogHandler] instead. +func ToSlogHandler(logger logr.Logger) slog.Handler { + return logr.ToSlogHandler(logger) } // SlogSink is an optional interface that a LogSink can implement to support -// logging through the slog.Logger or slog.Handler APIs better. It then should -// also support special slog values like slog.Group. When used as a -// slog.Handler, the advantages are: +// logging through the slog.Logger or slog.Handler APIs better. // -// - stack unwinding gets avoided in favor of logging the pre-recorded PC, -// as intended by slog -// - proper grouping of key/value pairs via WithGroup -// - verbosity levels > slog.LevelInfo can be recorded -// - less overhead -// -// Both APIs (logr.Logger and slog.Logger/Handler) then are supported equally -// well. Developers can pick whatever API suits them better and/or mix -// packages which use either API in the same binary with a common logging -// implementation. -// -// This interface is necessary because the type implementing the LogSink -// interface cannot also implement the slog.Handler interface due to the -// different prototype of the common Enabled method. -// -// An implementation could support both interfaces in two different types, but then -// additional interfaces would be needed to convert between those types in NewLogr -// and NewSlogHandler. -type SlogSink interface { - logr.LogSink - - Handle(ctx context.Context, record slog.Record) error - WithAttrs(attrs []slog.Attr) SlogSink - WithGroup(name string) SlogSink -} +// Deprecated: use [logr.SlogSink] instead. +type SlogSink = logr.SlogSink diff --git a/vendor/github.com/go-logr/logr/slogr/slogsink.go b/vendor/github.com/go-logr/logr/slogsink.go similarity index 82% rename from vendor/github.com/go-logr/logr/slogr/slogsink.go rename to vendor/github.com/go-logr/logr/slogsink.go index 6fbac561..4060fcbc 100644 --- a/vendor/github.com/go-logr/logr/slogr/slogsink.go +++ b/vendor/github.com/go-logr/logr/slogsink.go @@ -17,24 +17,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -package slogr +package logr import ( "context" "log/slog" "runtime" "time" - - "github.com/go-logr/logr" ) var ( - _ logr.LogSink = &slogSink{} - _ logr.CallDepthLogSink = &slogSink{} - _ Underlier = &slogSink{} + _ LogSink = &slogSink{} + _ CallDepthLogSink = &slogSink{} + _ Underlier = &slogSink{} ) -// Underlier is implemented by the LogSink returned by NewLogr. +// Underlier is implemented by the LogSink returned by NewFromLogHandler. type Underlier interface { // GetUnderlying returns the Handler used by the LogSink. GetUnderlying() slog.Handler @@ -54,7 +52,7 @@ type slogSink struct { handler slog.Handler } -func (l *slogSink) Init(info logr.RuntimeInfo) { +func (l *slogSink) Init(info RuntimeInfo) { l.callDepth = info.CallDepth } @@ -62,7 +60,7 @@ func (l *slogSink) GetUnderlying() slog.Handler { return l.handler } -func (l *slogSink) WithCallDepth(depth int) logr.LogSink { +func (l *slogSink) WithCallDepth(depth int) LogSink { newLogger := *l newLogger.callDepth += depth return &newLogger @@ -93,18 +91,18 @@ func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interf record.AddAttrs(slog.Any(errKey, err)) } record.Add(kvList...) - l.handler.Handle(context.Background(), record) + _ = l.handler.Handle(context.Background(), record) } -func (l slogSink) WithName(name string) logr.LogSink { +func (l slogSink) WithName(name string) LogSink { if l.name != "" { - l.name = l.name + "/" + l.name += "/" } l.name += name return &l } -func (l slogSink) WithValues(kvList ...interface{}) logr.LogSink { +func (l slogSink) WithValues(kvList ...interface{}) LogSink { l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...)) return &l } diff --git a/vendor/github.com/go-task/slim-sprig/v3/.editorconfig b/vendor/github.com/go-task/slim-sprig/v3/.editorconfig new file mode 100644 index 00000000..b0c95367 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/.editorconfig @@ -0,0 +1,14 @@ +# editorconfig.org + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = tab +indent_size = 8 + +[*.{md,yml,yaml,json}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-task/slim-sprig/v3/.gitattributes b/vendor/github.com/go-task/slim-sprig/v3/.gitattributes new file mode 100644 index 00000000..176a458f --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/.gitattributes @@ -0,0 +1 @@ +* text=auto diff --git a/vendor/github.com/go-task/slim-sprig/v3/.gitignore b/vendor/github.com/go-task/slim-sprig/v3/.gitignore new file mode 100644 index 00000000..5e3002f8 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/.gitignore @@ -0,0 +1,2 @@ +vendor/ +/.glide diff --git a/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md new file mode 100644 index 00000000..2ce45dd4 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md @@ -0,0 +1,383 @@ +# Changelog + +## Release 3.2.3 (2022-11-29) + +### Changed + +- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi) +- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero) +- #353: Updated masterminds/semver which included bug fixes +- #354: Updated golang.org/x/crypto which included bug fixes + +## Release 3.2.2 (2021-02-04) + +This is a re-release of 3.2.1 to satisfy something with the Go module system. + +## Release 3.2.1 (2021-02-04) + +### Changed + +- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) + +## Release 3.2.0 (2020-12-14) + +### Added + +- #211: Added randInt function (thanks @kochurovro) +- #223: Added fromJson and mustFromJson functions (thanks @mholt) +- #242: Added a bcrypt function (thanks @robbiet480) +- #253: Added randBytes function (thanks @MikaelSmith) +- #254: Added dig function for dicts (thanks @nyarly) +- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) +- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) +- #268: Added and and all functions for testing conditions (thanks @phuslu) +- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf + (thanks @andrewmostello) +- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) +- #270: Extend certificate functions to handle non-RSA keys + add support for + ed25519 keys (thanks @misberner) + +### Changed + +- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer +- Using semver 3.1.1 and mergo 0.3.11 + +### Fixed + +- #249: Fix htmlDateInZone example (thanks @spawnia) + +NOTE: The dependency github.com/imdario/mergo reverted the breaking change in +0.3.9 via 0.3.10 release. + +## Release 3.1.0 (2020-04-16) + +NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 +that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. + +### Added + +- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) +- #224: Added duration filter (thanks @frebib) +- #205: Added `seq` function (thanks @thadc23) + +### Changed + +- #203: Unlambda functions with correct signature (thanks @muesli) +- #236: Updated the license formatting for GitHub display purposes +- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 + as it causes a breaking change for sprig. That issue is tracked at + https://github.com/imdario/mergo/issues/139 + +### Fixed + +- #229: Fix `seq` example in docs (thanks @kalmant) + +## Release 3.0.2 (2019-12-13) + +### Fixed + +- #220: Updating to semver v3.0.3 to fix issue with <= ranges +- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) + +## Release 3.0.1 (2019-12-08) + +### Fixed + +- #212: Updated semver fixing broken constraint checking with ^0.0 + +## Release 3.0.0 (2019-10-02) + +### Added + +- #187: Added durationRound function (thanks @yjp20) +- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) +- #193: Added toRawJson support (thanks @Dean-Coakley) +- #197: Added get support to dicts (thanks @Dean-Coakley) + +### Changed + +- #186: Moving dependency management to Go modules +- #186: Updated semver to v3. This has changes in the way ^ is handled +- #194: Updated documentation on merging and how it copies. Added example using deepCopy +- #196: trunc now supports negative values (thanks @Dean-Coakley) + +## Release 2.22.0 (2019-10-02) + +### Added + +- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) +- #195: Added deepCopy function for use with dicts + +### Changed + +- Updated merge and mergeOverwrite documentation to explain copying and how to + use deepCopy with it + +## Release 2.21.0 (2019-09-18) + +### Added + +- #122: Added encryptAES/decryptAES functions (thanks @n0madic) +- #128: Added toDecimal support (thanks @Dean-Coakley) +- #169: Added list contcat (thanks @astorath) +- #174: Added deepEqual function (thanks @bonifaido) +- #170: Added url parse and join functions (thanks @astorath) + +### Changed + +- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify + +### Fixed + +- #172: Fix semver wildcard example (thanks @piepmatz) +- #175: Fix dateInZone doc example (thanks @s3than) + +## Release 2.20.0 (2019-06-18) + +### Added + +- #164: Adding function to get unix epoch for a time (@mattfarina) +- #166: Adding tests for date_in_zone (@mattfarina) + +### Changed + +- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) +- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) +- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) + +### Fixed + +## Release 2.19.0 (2019-03-02) + +IMPORTANT: This release reverts a change from 2.18.0 + +In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. + +We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. + +### Changed + +- Fix substr panic 35fb796 (Alexey igrychev) +- Remove extra period 1eb7729 (Matthew Lorimor) +- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) +- README edits/fixes/suggestions 08fe136 (Lauri Apple) + + +## Release 2.18.0 (2019-02-12) + +### Added + +- Added mergeOverwrite function +- cryptographic functions that use secure random (see fe1de12) + +### Changed + +- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) +- Handle has for nil list 9c10885 (Daniel Cohen) +- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) +- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) +- Replace outdated goutils imports 01893d2 (Matthew Lorimor) +- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) +- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) + +### Fixed + +- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) +- Fix substr var names and comments d581f80 (Dean Coakley) +- Fix substr documentation 2737203 (Dean Coakley) + +## Release 2.17.1 (2019-01-03) + +### Fixed + +The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. + +## Release 2.17.0 (2019-01-03) + +### Added + +- adds alder32sum function and test 6908fc2 (marshallford) +- Added kebabcase function ca331a1 (Ilyes512) + +### Changed + +- Update goutils to 1.1.0 4e1125d (Matt Butcher) + +### Fixed + +- Fix 'has' documentation e3f2a85 (dean-coakley) +- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) +- fixes spelling errors... not sure how that happened 4cf188a (marshallford) + +## Release 2.16.0 (2018-08-13) + +### Added + +- add splitn function fccb0b0 (Helgi Þorbjörnsson) +- Add slice func df28ca7 (gongdo) +- Generate serial number a3bdffd (Cody Coons) +- Extract values of dict with values function df39312 (Lawrence Jones) + +### Changed + +- Modify panic message for list.slice ae38335 (gongdo) +- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) +- Remove duplicated documentation 1d97af1 (Matthew Fisher) +- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) + +### Fixed + +- Fix file permissions c5f40b5 (gongdo) +- Fix example for buildCustomCert 7779e0d (Tin Lam) + +## Release 2.15.0 (2018-04-02) + +### Added + +- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) +- #66: Add ternary function (thanks @binoculars) +- #67: Allow keys function to take multiple dicts (thanks @binoculars) +- #89: Added sha1sum to crypto function (thanks @benkeil) +- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) +- #92: Add travis testing for go 1.10 +- #93: Adding appveyor config for windows testing + +### Changed + +- #90: Updating to more recent dependencies +- #73: replace satori/go.uuid with google/uuid (thanks @petterw) + +### Fixed + +- #76: Fixed documentation typos (thanks @Thiht) +- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older + +## Release 2.14.1 (2017-12-01) + +### Fixed + +- #60: Fix typo in function name documentation (thanks @neil-ca-moore) +- #61: Removing line with {{ due to blocking github pages genertion +- #64: Update the list functions to handle int, string, and other slices for compatibility + +## Release 2.14.0 (2017-10-06) + +This new version of Sprig adds a set of functions for generating and working with SSL certificates. + +- `genCA` generates an SSL Certificate Authority +- `genSelfSignedCert` generates an SSL self-signed certificate +- `genSignedCert` generates an SSL certificate and key based on a given CA + +## Release 2.13.0 (2017-09-18) + +This release adds new functions, including: + +- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions +- `floor`, `ceil`, and `round` math functions +- `toDate` converts a string to a date +- `nindent` is just like `indent` but also prepends a new line +- `ago` returns the time from `time.Now` + +### Added + +- #40: Added basic regex functionality (thanks @alanquillin) +- #41: Added ceil floor and round functions (thanks @alanquillin) +- #48: Added toDate function (thanks @andreynering) +- #50: Added nindent function (thanks @binoculars) +- #46: Added ago function (thanks @slayer) + +### Changed + +- #51: Updated godocs to include new string functions (thanks @curtisallen) +- #49: Added ability to merge multiple dicts (thanks @binoculars) + +## Release 2.12.0 (2017-05-17) + +- `snakecase`, `camelcase`, and `shuffle` are three new string functions +- `fail` allows you to bail out of a template render when conditions are not met + +## Release 2.11.0 (2017-05-02) + +- Added `toJson` and `toPrettyJson` +- Added `merge` +- Refactored documentation + +## Release 2.10.0 (2017-03-15) + +- Added `semver` and `semverCompare` for Semantic Versions +- `list` replaces `tuple` +- Fixed issue with `join` +- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` + +## Release 2.9.0 (2017-02-23) + +- Added `splitList` to split a list +- Added crypto functions of `genPrivateKey` and `derivePassword` + +## Release 2.8.0 (2016-12-21) + +- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) +- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) + +## Release 2.7.0 (2016-12-01) + +- Added `sha256sum` to generate a hash of an input +- Added functions to convert a numeric or string to `int`, `int64`, `float64` + +## Release 2.6.0 (2016-10-03) + +- Added a `uuidv4` template function for generating UUIDs inside of a template. + +## Release 2.5.0 (2016-08-19) + +- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions +- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) +- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 + +## Release 2.4.0 (2016-08-16) + +- Adds two functions: `until` and `untilStep` + +## Release 2.3.0 (2016-06-21) + +- cat: Concatenate strings with whitespace separators. +- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" +- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" +- indent: Indent blocks of text in a way that is sensitive to "\n" characters. + +## Release 2.2.0 (2016-04-21) + +- Added a `genPrivateKey` function (Thanks @bacongobbler) + +## Release 2.1.0 (2016-03-30) + +- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. +- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. + +## Release 2.0.0 (2016-03-29) + +Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. + +- `min` complements `max` (formerly `biggest`) +- `empty` indicates that a value is the empty value for its type +- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` +- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` +- Date formatters have been added for HTML dates (as used in `date` input fields) +- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). + +## Release 1.2.0 (2016-02-01) + +- Added quote and squote +- Added b32enc and b32dec +- add now takes varargs +- biggest now takes varargs + +## Release 1.1.0 (2015-12-29) + +- Added #4: Added contains function. strings.Contains, but with the arguments + switched to simplify common pipelines. (thanks krancour) +- Added Travis-CI testing support + +## Release 1.0.0 (2015-12-23) + +- Initial release diff --git a/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt b/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt new file mode 100644 index 00000000..f311b1ea --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2013-2020 Masterminds + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/go-task/slim-sprig/v3/README.md b/vendor/github.com/go-task/slim-sprig/v3/README.md new file mode 100644 index 00000000..b5ab5642 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/README.md @@ -0,0 +1,73 @@ +# Slim-Sprig: Template functions for Go templates [![Go Reference](https://pkg.go.dev/badge/github.com/go-task/slim-sprig/v3.svg)](https://pkg.go.dev/github.com/go-task/slim-sprig/v3) + +Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with +all functions that depend on external (non standard library) or crypto packages +removed. +The reason for this is to make this library more lightweight. Most of these +functions (specially crypto ones) are not needed on most apps, but costs a lot +in terms of binary size and compilation time. + +## Usage + +**Template developers**: Please use Slim-Sprig's [function documentation](https://go-task.github.io/slim-sprig/) for +detailed instructions and code snippets for the >100 template functions available. + +**Go developers**: If you'd like to include Slim-Sprig as a library in your program, +our API documentation is available [at GoDoc.org](http://godoc.org/github.com/go-task/slim-sprig). + +For standard usage, read on. + +### Load the Slim-Sprig library + +To load the Slim-Sprig `FuncMap`: + +```go + +import ( + "html/template" + + "github.com/go-task/slim-sprig" +) + +// This example illustrates that the FuncMap *must* be set before the +// templates themselves are loaded. +tpl := template.Must( + template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") +) +``` + +### Calling the functions inside of templates + +By convention, all functions are lowercase. This seems to follow the Go +idiom for template functions (as opposed to template methods, which are +TitleCase). For example, this: + +``` +{{ "hello!" | upper | repeat 5 }} +``` + +produces this: + +``` +HELLO!HELLO!HELLO!HELLO!HELLO! +``` + +## Principles Driving Our Function Selection + +We followed these principles to decide which functions to add and how to implement them: + +- Use template functions to build layout. The following + types of operations are within the domain of template functions: + - Formatting + - Layout + - Simple type conversions + - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) +- Template functions should not return errors unless there is no way to print + a sensible value. For example, converting a string to an integer should not + produce an error if conversion fails. Instead, it should display a default + value. +- Simple math is necessary for grid layouts, pagers, and so on. Complex math + (anything other than arithmetic) should be done outside of templates. +- Template functions only deal with the data passed into them. They never retrieve + data from a source. +- Finally, do not override core Go template functions. diff --git a/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml new file mode 100644 index 00000000..8e6346bb --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml @@ -0,0 +1,12 @@ +# https://taskfile.dev + +version: '3' + +tasks: + default: + cmds: + - task: test + + test: + cmds: + - go test -v . diff --git a/vendor/github.com/go-task/slim-sprig/v3/crypto.go b/vendor/github.com/go-task/slim-sprig/v3/crypto.go new file mode 100644 index 00000000..d06e516d --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/crypto.go @@ -0,0 +1,24 @@ +package sprig + +import ( + "crypto/sha1" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash/adler32" +) + +func sha256sum(input string) string { + hash := sha256.Sum256([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func sha1sum(input string) string { + hash := sha1.Sum([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func adler32sum(input string) string { + hash := adler32.Checksum([]byte(input)) + return fmt.Sprintf("%d", hash) +} diff --git a/vendor/github.com/go-task/slim-sprig/v3/date.go b/vendor/github.com/go-task/slim-sprig/v3/date.go new file mode 100644 index 00000000..ed022dda --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/date.go @@ -0,0 +1,152 @@ +package sprig + +import ( + "strconv" + "time" +) + +// Given a format and a date, format the date string. +// +// Date can be a `time.Time` or an `int, int32, int64`. +// In the later case, it is treated as seconds since UNIX +// epoch. +func date(fmt string, date interface{}) string { + return dateInZone(fmt, date, "Local") +} + +func htmlDate(date interface{}) string { + return dateInZone("2006-01-02", date, "Local") +} + +func htmlDateInZone(date interface{}, zone string) string { + return dateInZone("2006-01-02", date, zone) +} + +func dateInZone(fmt string, date interface{}, zone string) string { + var t time.Time + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case *time.Time: + t = *date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + case int32: + t = time.Unix(int64(date), 0) + } + + loc, err := time.LoadLocation(zone) + if err != nil { + loc, _ = time.LoadLocation("UTC") + } + + return t.In(loc).Format(fmt) +} + +func dateModify(fmt string, date time.Time) time.Time { + d, err := time.ParseDuration(fmt) + if err != nil { + return date + } + return date.Add(d) +} + +func mustDateModify(fmt string, date time.Time) (time.Time, error) { + d, err := time.ParseDuration(fmt) + if err != nil { + return time.Time{}, err + } + return date.Add(d), nil +} + +func dateAgo(date interface{}) string { + var t time.Time + + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + } + // Drop resolution to seconds + duration := time.Since(t).Round(time.Second) + return duration.String() +} + +func duration(sec interface{}) string { + var n int64 + switch value := sec.(type) { + default: + n = 0 + case string: + n, _ = strconv.ParseInt(value, 10, 64) + case int64: + n = value + } + return (time.Duration(n) * time.Second).String() +} + +func durationRound(duration interface{}) string { + var d time.Duration + switch duration := duration.(type) { + default: + d = 0 + case string: + d, _ = time.ParseDuration(duration) + case int64: + d = time.Duration(duration) + case time.Time: + d = time.Since(duration) + } + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + } + + var ( + year = uint64(time.Hour) * 24 * 365 + month = uint64(time.Hour) * 24 * 30 + day = uint64(time.Hour) * 24 + hour = uint64(time.Hour) + minute = uint64(time.Minute) + second = uint64(time.Second) + ) + switch { + case u > year: + return strconv.FormatUint(u/year, 10) + "y" + case u > month: + return strconv.FormatUint(u/month, 10) + "mo" + case u > day: + return strconv.FormatUint(u/day, 10) + "d" + case u > hour: + return strconv.FormatUint(u/hour, 10) + "h" + case u > minute: + return strconv.FormatUint(u/minute, 10) + "m" + case u > second: + return strconv.FormatUint(u/second, 10) + "s" + } + return "0s" +} + +func toDate(fmt, str string) time.Time { + t, _ := time.ParseInLocation(fmt, str, time.Local) + return t +} + +func mustToDate(fmt, str string) (time.Time, error) { + return time.ParseInLocation(fmt, str, time.Local) +} + +func unixEpoch(date time.Time) string { + return strconv.FormatInt(date.Unix(), 10) +} diff --git a/vendor/github.com/go-task/slim-sprig/v3/defaults.go b/vendor/github.com/go-task/slim-sprig/v3/defaults.go new file mode 100644 index 00000000..b9f97966 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/defaults.go @@ -0,0 +1,163 @@ +package sprig + +import ( + "bytes" + "encoding/json" + "math/rand" + "reflect" + "strings" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// dfault checks whether `given` is set, and returns default if not set. +// +// This returns `d` if `given` appears not to be set, and `given` otherwise. +// +// For numeric types 0 is unset. +// For strings, maps, arrays, and slices, len() = 0 is considered unset. +// For bool, false is unset. +// Structs are never considered unset. +// +// For everything else, including pointers, a nil value is unset. +func dfault(d interface{}, given ...interface{}) interface{} { + + if empty(given) || empty(given[0]) { + return d + } + return given[0] +} + +// empty returns true if the given value has the zero value for its type. +func empty(given interface{}) bool { + g := reflect.ValueOf(given) + if !g.IsValid() { + return true + } + + // Basically adapted from text/template.isTrue + switch g.Kind() { + default: + return g.IsNil() + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return g.Len() == 0 + case reflect.Bool: + return !g.Bool() + case reflect.Complex64, reflect.Complex128: + return g.Complex() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return g.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return g.Uint() == 0 + case reflect.Float32, reflect.Float64: + return g.Float() == 0 + case reflect.Struct: + return false + } +} + +// coalesce returns the first non-empty value. +func coalesce(v ...interface{}) interface{} { + for _, val := range v { + if !empty(val) { + return val + } + } + return nil +} + +// all returns true if empty(x) is false for all values x in the list. +// If the list is empty, return true. +func all(v ...interface{}) bool { + for _, val := range v { + if empty(val) { + return false + } + } + return true +} + +// any returns true if empty(x) is false for any x in the list. +// If the list is empty, return false. +func any(v ...interface{}) bool { + for _, val := range v { + if !empty(val) { + return true + } + } + return false +} + +// fromJson decodes JSON into a structured value, ignoring errors. +func fromJson(v string) interface{} { + output, _ := mustFromJson(v) + return output +} + +// mustFromJson decodes JSON into a structured value, returning errors. +func mustFromJson(v string) (interface{}, error) { + var output interface{} + err := json.Unmarshal([]byte(v), &output) + return output, err +} + +// toJson encodes an item into a JSON string +func toJson(v interface{}) string { + output, _ := json.Marshal(v) + return string(output) +} + +func mustToJson(v interface{}) (string, error) { + output, err := json.Marshal(v) + if err != nil { + return "", err + } + return string(output), nil +} + +// toPrettyJson encodes an item into a pretty (indented) JSON string +func toPrettyJson(v interface{}) string { + output, _ := json.MarshalIndent(v, "", " ") + return string(output) +} + +func mustToPrettyJson(v interface{}) (string, error) { + output, err := json.MarshalIndent(v, "", " ") + if err != nil { + return "", err + } + return string(output), nil +} + +// toRawJson encodes an item into a JSON string with no escaping of HTML characters. +func toRawJson(v interface{}) string { + output, err := mustToRawJson(v) + if err != nil { + panic(err) + } + return string(output) +} + +// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters. +func mustToRawJson(v interface{}) (string, error) { + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(&v) + if err != nil { + return "", err + } + return strings.TrimSuffix(buf.String(), "\n"), nil +} + +// ternary returns the first value if the last value is true, otherwise returns the second value. +func ternary(vt interface{}, vf interface{}, v bool) interface{} { + if v { + return vt + } + + return vf +} diff --git a/vendor/github.com/go-task/slim-sprig/v3/dict.go b/vendor/github.com/go-task/slim-sprig/v3/dict.go new file mode 100644 index 00000000..77ebc61b --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/dict.go @@ -0,0 +1,118 @@ +package sprig + +func get(d map[string]interface{}, key string) interface{} { + if val, ok := d[key]; ok { + return val + } + return "" +} + +func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { + d[key] = value + return d +} + +func unset(d map[string]interface{}, key string) map[string]interface{} { + delete(d, key) + return d +} + +func hasKey(d map[string]interface{}, key string) bool { + _, ok := d[key] + return ok +} + +func pluck(key string, d ...map[string]interface{}) []interface{} { + res := []interface{}{} + for _, dict := range d { + if val, ok := dict[key]; ok { + res = append(res, val) + } + } + return res +} + +func keys(dicts ...map[string]interface{}) []string { + k := []string{} + for _, dict := range dicts { + for key := range dict { + k = append(k, key) + } + } + return k +} + +func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + for _, k := range keys { + if v, ok := dict[k]; ok { + res[k] = v + } + } + return res +} + +func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + + omit := make(map[string]bool, len(keys)) + for _, k := range keys { + omit[k] = true + } + + for k, v := range dict { + if _, ok := omit[k]; !ok { + res[k] = v + } + } + return res +} + +func dict(v ...interface{}) map[string]interface{} { + dict := map[string]interface{}{} + lenv := len(v) + for i := 0; i < lenv; i += 2 { + key := strval(v[i]) + if i+1 >= lenv { + dict[key] = "" + continue + } + dict[key] = v[i+1] + } + return dict +} + +func values(dict map[string]interface{}) []interface{} { + values := []interface{}{} + for _, value := range dict { + values = append(values, value) + } + + return values +} + +func dig(ps ...interface{}) (interface{}, error) { + if len(ps) < 3 { + panic("dig needs at least three arguments") + } + dict := ps[len(ps)-1].(map[string]interface{}) + def := ps[len(ps)-2] + ks := make([]string, len(ps)-2) + for i := 0; i < len(ks); i++ { + ks[i] = ps[i].(string) + } + + return digFromDict(dict, def, ks) +} + +func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) { + k, ns := ks[0], ks[1:len(ks)] + step, has := dict[k] + if !has { + return d, nil + } + if len(ns) == 0 { + return step, nil + } + return digFromDict(step.(map[string]interface{}), d, ns) +} diff --git a/vendor/github.com/go-task/slim-sprig/v3/doc.go b/vendor/github.com/go-task/slim-sprig/v3/doc.go new file mode 100644 index 00000000..aabb9d44 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/doc.go @@ -0,0 +1,19 @@ +/* +Package sprig provides template functions for Go. + +This package contains a number of utility functions for working with data +inside of Go `html/template` and `text/template` files. + +To add these functions, use the `template.Funcs()` method: + + t := templates.New("foo").Funcs(sprig.FuncMap()) + +Note that you should add the function map before you parse any template files. + + In several cases, Sprig reverses the order of arguments from the way they + appear in the standard library. This is to make it easier to pipe + arguments into functions. + +See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. +*/ +package sprig diff --git a/vendor/github.com/go-task/slim-sprig/v3/functions.go b/vendor/github.com/go-task/slim-sprig/v3/functions.go new file mode 100644 index 00000000..5ea74f89 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/functions.go @@ -0,0 +1,317 @@ +package sprig + +import ( + "errors" + "html/template" + "math/rand" + "os" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + ttemplate "text/template" + "time" +) + +// FuncMap produces the function map. +// +// Use this to pass the functions into the template engine: +// +// tpl := template.New("foo").Funcs(sprig.FuncMap())) +// +func FuncMap() template.FuncMap { + return HtmlFuncMap() +} + +// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. +func HermeticTxtFuncMap() ttemplate.FuncMap { + r := TxtFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. +func HermeticHtmlFuncMap() template.FuncMap { + r := HtmlFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// TxtFuncMap returns a 'text/template'.FuncMap +func TxtFuncMap() ttemplate.FuncMap { + return ttemplate.FuncMap(GenericFuncMap()) +} + +// HtmlFuncMap returns an 'html/template'.Funcmap +func HtmlFuncMap() template.FuncMap { + return template.FuncMap(GenericFuncMap()) +} + +// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. +func GenericFuncMap() map[string]interface{} { + gfm := make(map[string]interface{}, len(genericMap)) + for k, v := range genericMap { + gfm[k] = v + } + return gfm +} + +// These functions are not guaranteed to evaluate to the same result for given input, because they +// refer to the environment or global state. +var nonhermeticFunctions = []string{ + // Date functions + "date", + "date_in_zone", + "date_modify", + "now", + "htmlDate", + "htmlDateInZone", + "dateInZone", + "dateModify", + + // Strings + "randAlphaNum", + "randAlpha", + "randAscii", + "randNumeric", + "randBytes", + "uuidv4", + + // OS + "env", + "expandenv", + + // Network + "getHostByName", +} + +var genericMap = map[string]interface{}{ + "hello": func() string { return "Hello!" }, + + // Date functions + "ago": dateAgo, + "date": date, + "date_in_zone": dateInZone, + "date_modify": dateModify, + "dateInZone": dateInZone, + "dateModify": dateModify, + "duration": duration, + "durationRound": durationRound, + "htmlDate": htmlDate, + "htmlDateInZone": htmlDateInZone, + "must_date_modify": mustDateModify, + "mustDateModify": mustDateModify, + "mustToDate": mustToDate, + "now": time.Now, + "toDate": toDate, + "unixEpoch": unixEpoch, + + // Strings + "trunc": trunc, + "trim": strings.TrimSpace, + "upper": strings.ToUpper, + "lower": strings.ToLower, + "title": strings.Title, + "substr": substring, + // Switch order so that "foo" | repeat 5 + "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, + // Deprecated: Use trimAll. + "trimall": func(a, b string) string { return strings.Trim(b, a) }, + // Switch order so that "$foo" | trimall "$" + "trimAll": func(a, b string) string { return strings.Trim(b, a) }, + "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, + "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, + // Switch order so that "foobar" | contains "foo" + "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, + "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, + "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, + "quote": quote, + "squote": squote, + "cat": cat, + "indent": indent, + "nindent": nindent, + "replace": replace, + "plural": plural, + "sha1sum": sha1sum, + "sha256sum": sha256sum, + "adler32sum": adler32sum, + "toString": strval, + + // Wrap Atoi to stop errors. + "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, + "int64": toInt64, + "int": toInt, + "float64": toFloat64, + "seq": seq, + "toDecimal": toDecimal, + + //"gt": func(a, b int) bool {return a > b}, + //"gte": func(a, b int) bool {return a >= b}, + //"lt": func(a, b int) bool {return a < b}, + //"lte": func(a, b int) bool {return a <= b}, + + // split "/" foo/bar returns map[int]string{0: foo, 1: bar} + "split": split, + "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, + // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} + "splitn": splitn, + "toStrings": strslice, + + "until": until, + "untilStep": untilStep, + + // VERY basic arithmetic. + "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, + "add": func(i ...interface{}) int64 { + var a int64 = 0 + for _, b := range i { + a += toInt64(b) + } + return a + }, + "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, + "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, + "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, + "mul": func(a interface{}, v ...interface{}) int64 { + val := toInt64(a) + for _, b := range v { + val = val * toInt64(b) + } + return val + }, + "randInt": func(min, max int) int { return rand.Intn(max-min) + min }, + "biggest": max, + "max": max, + "min": min, + "maxf": maxf, + "minf": minf, + "ceil": ceil, + "floor": floor, + "round": round, + + // string slices. Note that we reverse the order b/c that's better + // for template processing. + "join": join, + "sortAlpha": sortAlpha, + + // Defaults + "default": dfault, + "empty": empty, + "coalesce": coalesce, + "all": all, + "any": any, + "compact": compact, + "mustCompact": mustCompact, + "fromJson": fromJson, + "toJson": toJson, + "toPrettyJson": toPrettyJson, + "toRawJson": toRawJson, + "mustFromJson": mustFromJson, + "mustToJson": mustToJson, + "mustToPrettyJson": mustToPrettyJson, + "mustToRawJson": mustToRawJson, + "ternary": ternary, + + // Reflection + "typeOf": typeOf, + "typeIs": typeIs, + "typeIsLike": typeIsLike, + "kindOf": kindOf, + "kindIs": kindIs, + "deepEqual": reflect.DeepEqual, + + // OS: + "env": os.Getenv, + "expandenv": os.ExpandEnv, + + // Network: + "getHostByName": getHostByName, + + // Paths: + "base": path.Base, + "dir": path.Dir, + "clean": path.Clean, + "ext": path.Ext, + "isAbs": path.IsAbs, + + // Filepaths: + "osBase": filepath.Base, + "osClean": filepath.Clean, + "osDir": filepath.Dir, + "osExt": filepath.Ext, + "osIsAbs": filepath.IsAbs, + + // Encoding: + "b64enc": base64encode, + "b64dec": base64decode, + "b32enc": base32encode, + "b32dec": base32decode, + + // Data Structures: + "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. + "list": list, + "dict": dict, + "get": get, + "set": set, + "unset": unset, + "hasKey": hasKey, + "pluck": pluck, + "keys": keys, + "pick": pick, + "omit": omit, + "values": values, + + "append": push, "push": push, + "mustAppend": mustPush, "mustPush": mustPush, + "prepend": prepend, + "mustPrepend": mustPrepend, + "first": first, + "mustFirst": mustFirst, + "rest": rest, + "mustRest": mustRest, + "last": last, + "mustLast": mustLast, + "initial": initial, + "mustInitial": mustInitial, + "reverse": reverse, + "mustReverse": mustReverse, + "uniq": uniq, + "mustUniq": mustUniq, + "without": without, + "mustWithout": mustWithout, + "has": has, + "mustHas": mustHas, + "slice": slice, + "mustSlice": mustSlice, + "concat": concat, + "dig": dig, + "chunk": chunk, + "mustChunk": mustChunk, + + // Flow Control: + "fail": func(msg string) (string, error) { return "", errors.New(msg) }, + + // Regex + "regexMatch": regexMatch, + "mustRegexMatch": mustRegexMatch, + "regexFindAll": regexFindAll, + "mustRegexFindAll": mustRegexFindAll, + "regexFind": regexFind, + "mustRegexFind": mustRegexFind, + "regexReplaceAll": regexReplaceAll, + "mustRegexReplaceAll": mustRegexReplaceAll, + "regexReplaceAllLiteral": regexReplaceAllLiteral, + "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral, + "regexSplit": regexSplit, + "mustRegexSplit": mustRegexSplit, + "regexQuoteMeta": regexQuoteMeta, + + // URLs: + "urlParse": urlParse, + "urlJoin": urlJoin, +} diff --git a/vendor/github.com/go-task/slim-sprig/v3/list.go b/vendor/github.com/go-task/slim-sprig/v3/list.go new file mode 100644 index 00000000..ca0fbb78 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/list.go @@ -0,0 +1,464 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// Reflection is used in these functions so that slices and arrays of strings, +// ints, and other types not implementing []interface{} can be worked with. +// For example, this is useful if you need to work on the output of regexs. + +func list(v ...interface{}) []interface{} { + return v +} + +func push(list interface{}, v interface{}) []interface{} { + l, err := mustPush(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPush(list interface{}, v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append(nl, v), nil + + default: + return nil, fmt.Errorf("Cannot push on type %s", tp) + } +} + +func prepend(list interface{}, v interface{}) []interface{} { + l, err := mustPrepend(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) { + //return append([]interface{}{v}, list...) + + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append([]interface{}{v}, nl...), nil + + default: + return nil, fmt.Errorf("Cannot prepend on type %s", tp) + } +} + +func chunk(size int, list interface{}) [][]interface{} { + l, err := mustChunk(size, list) + if err != nil { + panic(err) + } + + return l +} + +func mustChunk(size int, list interface{}) ([][]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + + cs := int(math.Floor(float64(l-1)/float64(size)) + 1) + nl := make([][]interface{}, cs) + + for i := 0; i < cs; i++ { + clen := size + if i == cs-1 { + clen = int(math.Floor(math.Mod(float64(l), float64(size)))) + if clen == 0 { + clen = size + } + } + + nl[i] = make([]interface{}, clen) + + for j := 0; j < clen; j++ { + ix := i*size + j + nl[i][j] = l2.Index(ix).Interface() + } + } + + return nl, nil + + default: + return nil, fmt.Errorf("Cannot chunk type %s", tp) + } +} + +func last(list interface{}) interface{} { + l, err := mustLast(list) + if err != nil { + panic(err) + } + + return l +} + +func mustLast(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(l - 1).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find last on type %s", tp) + } +} + +func first(list interface{}) interface{} { + l, err := mustFirst(list) + if err != nil { + panic(err) + } + + return l +} + +func mustFirst(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(0).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find first on type %s", tp) + } +} + +func rest(list interface{}) []interface{} { + l, err := mustRest(list) + if err != nil { + panic(err) + } + + return l +} + +func mustRest(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 1; i < l; i++ { + nl[i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find rest on type %s", tp) + } +} + +func initial(list interface{}) []interface{} { + l, err := mustInitial(list) + if err != nil { + panic(err) + } + + return l +} + +func mustInitial(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 0; i < l-1; i++ { + nl[i] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find initial on type %s", tp) + } +} + +func sortAlpha(list interface{}) []string { + k := reflect.Indirect(reflect.ValueOf(list)).Kind() + switch k { + case reflect.Slice, reflect.Array: + a := strslice(list) + s := sort.StringSlice(a) + s.Sort() + return s + } + return []string{strval(list)} +} + +func reverse(v interface{}) []interface{} { + l, err := mustReverse(v) + if err != nil { + panic(err) + } + + return l +} + +func mustReverse(v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(v).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(v) + + l := l2.Len() + // We do not sort in place because the incoming array should not be altered. + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[l-i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find reverse on type %s", tp) + } +} + +func compact(list interface{}) []interface{} { + l, err := mustCompact(list) + if err != nil { + panic(err) + } + + return l +} + +func mustCompact(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !empty(item) { + nl = append(nl, item) + } + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot compact on type %s", tp) + } +} + +func uniq(list interface{}) []interface{} { + l, err := mustUniq(list) + if err != nil { + panic(err) + } + + return l +} + +func mustUniq(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + dest := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(dest, item) { + dest = append(dest, item) + } + } + + return dest, nil + default: + return nil, fmt.Errorf("Cannot find uniq on type %s", tp) + } +} + +func inList(haystack []interface{}, needle interface{}) bool { + for _, h := range haystack { + if reflect.DeepEqual(needle, h) { + return true + } + } + return false +} + +func without(list interface{}, omit ...interface{}) []interface{} { + l, err := mustWithout(list, omit...) + if err != nil { + panic(err) + } + + return l +} + +func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + res := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(omit, item) { + res = append(res, item) + } + } + + return res, nil + default: + return nil, fmt.Errorf("Cannot find without on type %s", tp) + } +} + +func has(needle interface{}, haystack interface{}) bool { + l, err := mustHas(needle, haystack) + if err != nil { + panic(err) + } + + return l +} + +func mustHas(needle interface{}, haystack interface{}) (bool, error) { + if haystack == nil { + return false, nil + } + tp := reflect.TypeOf(haystack).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(haystack) + var item interface{} + l := l2.Len() + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if reflect.DeepEqual(needle, item) { + return true, nil + } + } + + return false, nil + default: + return false, fmt.Errorf("Cannot find has on type %s", tp) + } +} + +// $list := [1, 2, 3, 4, 5] +// slice $list -> list[0:5] = list[:] +// slice $list 0 3 -> list[0:3] = list[:3] +// slice $list 3 5 -> list[3:5] +// slice $list 3 -> list[3:5] = list[3:] +func slice(list interface{}, indices ...interface{}) interface{} { + l, err := mustSlice(list, indices...) + if err != nil { + panic(err) + } + + return l +} + +func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + var start, end int + if len(indices) > 0 { + start = toInt(indices[0]) + } + if len(indices) < 2 { + end = l + } else { + end = toInt(indices[1]) + } + + return l2.Slice(start, end).Interface(), nil + default: + return nil, fmt.Errorf("list should be type of slice or array but %s", tp) + } +} + +func concat(lists ...interface{}) interface{} { + var res []interface{} + for _, list := range lists { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + for i := 0; i < l2.Len(); i++ { + res = append(res, l2.Index(i).Interface()) + } + default: + panic(fmt.Sprintf("Cannot concat type %s as list", tp)) + } + } + return res +} diff --git a/vendor/github.com/go-task/slim-sprig/v3/network.go b/vendor/github.com/go-task/slim-sprig/v3/network.go new file mode 100644 index 00000000..108d78a9 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/network.go @@ -0,0 +1,12 @@ +package sprig + +import ( + "math/rand" + "net" +) + +func getHostByName(name string) string { + addrs, _ := net.LookupHost(name) + //TODO: add error handing when release v3 comes out + return addrs[rand.Intn(len(addrs))] +} diff --git a/vendor/github.com/go-task/slim-sprig/v3/numeric.go b/vendor/github.com/go-task/slim-sprig/v3/numeric.go new file mode 100644 index 00000000..98cbb37a --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/numeric.go @@ -0,0 +1,228 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "strconv" + "strings" +) + +// toFloat64 converts 64-bit floats +func toFloat64(v interface{}) float64 { + if str, ok := v.(string); ok { + iv, err := strconv.ParseFloat(str, 64) + if err != nil { + return 0 + } + return iv + } + + val := reflect.Indirect(reflect.ValueOf(v)) + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return float64(val.Int()) + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return float64(val.Uint()) + case reflect.Uint, reflect.Uint64: + return float64(val.Uint()) + case reflect.Float32, reflect.Float64: + return val.Float() + case reflect.Bool: + if val.Bool() { + return 1 + } + return 0 + default: + return 0 + } +} + +func toInt(v interface{}) int { + //It's not optimal. Bud I don't want duplicate toInt64 code. + return int(toInt64(v)) +} + +// toInt64 converts integer types to 64-bit integers +func toInt64(v interface{}) int64 { + if str, ok := v.(string); ok { + iv, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return 0 + } + return iv + } + + val := reflect.Indirect(reflect.ValueOf(v)) + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return val.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return int64(val.Uint()) + case reflect.Uint, reflect.Uint64: + tv := val.Uint() + if tv <= math.MaxInt64 { + return int64(tv) + } + // TODO: What is the sensible thing to do here? + return math.MaxInt64 + case reflect.Float32, reflect.Float64: + return int64(val.Float()) + case reflect.Bool: + if val.Bool() { + return 1 + } + return 0 + default: + return 0 + } +} + +func max(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb > aa { + aa = bb + } + } + return aa +} + +func maxf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Max(aa, bb) + } + return aa +} + +func min(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb < aa { + aa = bb + } + } + return aa +} + +func minf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Min(aa, bb) + } + return aa +} + +func until(count int) []int { + step := 1 + if count < 0 { + step = -1 + } + return untilStep(0, count, step) +} + +func untilStep(start, stop, step int) []int { + v := []int{} + + if stop < start { + if step >= 0 { + return v + } + for i := start; i > stop; i += step { + v = append(v, i) + } + return v + } + + if step <= 0 { + return v + } + for i := start; i < stop; i += step { + v = append(v, i) + } + return v +} + +func floor(a interface{}) float64 { + aa := toFloat64(a) + return math.Floor(aa) +} + +func ceil(a interface{}) float64 { + aa := toFloat64(a) + return math.Ceil(aa) +} + +func round(a interface{}, p int, rOpt ...float64) float64 { + roundOn := .5 + if len(rOpt) > 0 { + roundOn = rOpt[0] + } + val := toFloat64(a) + places := toFloat64(p) + + var round float64 + pow := math.Pow(10, places) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + return round / pow +} + +// converts unix octal to decimal +func toDecimal(v interface{}) int64 { + result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) + if err != nil { + return 0 + } + return result +} + +func seq(params ...int) string { + increment := 1 + switch len(params) { + case 0: + return "" + case 1: + start := 1 + end := params[0] + if end < start { + increment = -1 + } + return intArrayToString(untilStep(start, end+increment, increment), " ") + case 3: + start := params[0] + end := params[2] + step := params[1] + if end < start { + increment = -1 + if step > 0 { + return "" + } + } + return intArrayToString(untilStep(start, end+increment, step), " ") + case 2: + start := params[0] + end := params[1] + step := 1 + if end < start { + step = -1 + } + return intArrayToString(untilStep(start, end+step, step), " ") + default: + return "" + } +} + +func intArrayToString(slice []int, delimeter string) string { + return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]") +} diff --git a/vendor/github.com/go-task/slim-sprig/v3/reflect.go b/vendor/github.com/go-task/slim-sprig/v3/reflect.go new file mode 100644 index 00000000..8a65c132 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/reflect.go @@ -0,0 +1,28 @@ +package sprig + +import ( + "fmt" + "reflect" +) + +// typeIs returns true if the src is the type named in target. +func typeIs(target string, src interface{}) bool { + return target == typeOf(src) +} + +func typeIsLike(target string, src interface{}) bool { + t := typeOf(src) + return target == t || "*"+target == t +} + +func typeOf(src interface{}) string { + return fmt.Sprintf("%T", src) +} + +func kindIs(target string, src interface{}) bool { + return target == kindOf(src) +} + +func kindOf(src interface{}) string { + return reflect.ValueOf(src).Kind().String() +} diff --git a/vendor/github.com/go-task/slim-sprig/v3/regex.go b/vendor/github.com/go-task/slim-sprig/v3/regex.go new file mode 100644 index 00000000..fab55101 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/regex.go @@ -0,0 +1,83 @@ +package sprig + +import ( + "regexp" +) + +func regexMatch(regex string, s string) bool { + match, _ := regexp.MatchString(regex, s) + return match +} + +func mustRegexMatch(regex string, s string) (bool, error) { + return regexp.MatchString(regex, s) +} + +func regexFindAll(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.FindAllString(s, n) +} + +func mustRegexFindAll(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.FindAllString(s, n), nil +} + +func regexFind(regex string, s string) string { + r := regexp.MustCompile(regex) + return r.FindString(s) +} + +func mustRegexFind(regex string, s string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.FindString(s), nil +} + +func regexReplaceAll(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllString(s, repl) +} + +func mustRegexReplaceAll(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllString(s, repl), nil +} + +func regexReplaceAllLiteral(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllLiteralString(s, repl) +} + +func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllLiteralString(s, repl), nil +} + +func regexSplit(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.Split(s, n) +} + +func mustRegexSplit(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.Split(s, n), nil +} + +func regexQuoteMeta(s string) string { + return regexp.QuoteMeta(s) +} diff --git a/vendor/github.com/go-task/slim-sprig/v3/strings.go b/vendor/github.com/go-task/slim-sprig/v3/strings.go new file mode 100644 index 00000000..3c62d6b6 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/strings.go @@ -0,0 +1,189 @@ +package sprig + +import ( + "encoding/base32" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" +) + +func base64encode(v string) string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +func base64decode(v string) string { + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func base32encode(v string) string { + return base32.StdEncoding.EncodeToString([]byte(v)) +} + +func base32decode(v string) string { + data, err := base32.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func quote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("%q", strval(s))) + } + } + return strings.Join(out, " ") +} + +func squote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("'%v'", s)) + } + } + return strings.Join(out, " ") +} + +func cat(v ...interface{}) string { + v = removeNilElements(v) + r := strings.TrimSpace(strings.Repeat("%v ", len(v))) + return fmt.Sprintf(r, v...) +} + +func indent(spaces int, v string) string { + pad := strings.Repeat(" ", spaces) + return pad + strings.Replace(v, "\n", "\n"+pad, -1) +} + +func nindent(spaces int, v string) string { + return "\n" + indent(spaces, v) +} + +func replace(old, new, src string) string { + return strings.Replace(src, old, new, -1) +} + +func plural(one, many string, count int) string { + if count == 1 { + return one + } + return many +} + +func strslice(v interface{}) []string { + switch v := v.(type) { + case []string: + return v + case []interface{}: + b := make([]string, 0, len(v)) + for _, s := range v { + if s != nil { + b = append(b, strval(s)) + } + } + return b + default: + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Array, reflect.Slice: + l := val.Len() + b := make([]string, 0, l) + for i := 0; i < l; i++ { + value := val.Index(i).Interface() + if value != nil { + b = append(b, strval(value)) + } + } + return b + default: + if v == nil { + return []string{} + } + + return []string{strval(v)} + } + } +} + +func removeNilElements(v []interface{}) []interface{} { + newSlice := make([]interface{}, 0, len(v)) + for _, i := range v { + if i != nil { + newSlice = append(newSlice, i) + } + } + return newSlice +} + +func strval(v interface{}) string { + switch v := v.(type) { + case string: + return v + case []byte: + return string(v) + case error: + return v.Error() + case fmt.Stringer: + return v.String() + default: + return fmt.Sprintf("%v", v) + } +} + +func trunc(c int, s string) string { + if c < 0 && len(s)+c > 0 { + return s[len(s)+c:] + } + if c >= 0 && len(s) > c { + return s[:c] + } + return s +} + +func join(sep string, v interface{}) string { + return strings.Join(strslice(v), sep) +} + +func split(sep, orig string) map[string]string { + parts := strings.Split(orig, sep) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +func splitn(sep string, n int, orig string) map[string]string { + parts := strings.SplitN(orig, sep, n) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +// substring creates a substring of the given string. +// +// If start is < 0, this calls string[:end]. +// +// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] +// +// Otherwise, this calls string[start, end]. +func substring(start, end int, s string) string { + if start < 0 { + return s[:end] + } + if end < 0 || end > len(s) { + return s[start:] + } + return s[start:end] +} diff --git a/vendor/github.com/go-task/slim-sprig/v3/url.go b/vendor/github.com/go-task/slim-sprig/v3/url.go new file mode 100644 index 00000000..b8e120e1 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/url.go @@ -0,0 +1,66 @@ +package sprig + +import ( + "fmt" + "net/url" + "reflect" +) + +func dictGetOrEmpty(dict map[string]interface{}, key string) string { + value, ok := dict[key] + if !ok { + return "" + } + tp := reflect.TypeOf(value).Kind() + if tp != reflect.String { + panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) + } + return reflect.ValueOf(value).String() +} + +// parses given URL to return dict object +func urlParse(v string) map[string]interface{} { + dict := map[string]interface{}{} + parsedURL, err := url.Parse(v) + if err != nil { + panic(fmt.Sprintf("unable to parse url: %s", err)) + } + dict["scheme"] = parsedURL.Scheme + dict["host"] = parsedURL.Host + dict["hostname"] = parsedURL.Hostname() + dict["path"] = parsedURL.Path + dict["query"] = parsedURL.RawQuery + dict["opaque"] = parsedURL.Opaque + dict["fragment"] = parsedURL.Fragment + if parsedURL.User != nil { + dict["userinfo"] = parsedURL.User.String() + } else { + dict["userinfo"] = "" + } + + return dict +} + +// join given dict to URL string +func urlJoin(d map[string]interface{}) string { + resURL := url.URL{ + Scheme: dictGetOrEmpty(d, "scheme"), + Host: dictGetOrEmpty(d, "host"), + Path: dictGetOrEmpty(d, "path"), + RawQuery: dictGetOrEmpty(d, "query"), + Opaque: dictGetOrEmpty(d, "opaque"), + Fragment: dictGetOrEmpty(d, "fragment"), + } + userinfo := dictGetOrEmpty(d, "userinfo") + var user *url.Userinfo + if userinfo != "" { + tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) + if err != nil { + panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) + } + user = tempURL.User + } + + resURL.User = user + return resURL.String() +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/Makefile b/vendor/github.com/gogo/protobuf/gogoproto/Makefile new file mode 100644 index 00000000..0b4659b7 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/Makefile @@ -0,0 +1,37 @@ +# Protocol Buffers for Go with Gadgets +# +# Copyright (c) 2013, The GoGo Authors. All rights reserved. +# http://github.com/gogo/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto + +restore: + cp gogo.pb.golden gogo.pb.go + +preserve: + cp gogo.pb.go gogo.pb.golden diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go new file mode 100644 index 00000000..081c86fa --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -0,0 +1,169 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package gogoproto provides extensions for protocol buffers to achieve: + + - fast marshalling and unmarshalling. + - peace of mind by optionally generating test and benchmark code. + - more canonical Go structures. + - less typing by optionally generating extra helper code. + - goprotobuf compatibility + +More Canonical Go Structures + +A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. +You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct. +Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions. + + - nullable, if false, a field is generated without a pointer (see warning below). + - embed, if true, the field is generated as an embedded field. + - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128 + - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. + - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums. + - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + +Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset. + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +for a quicker overview. + +The following message: + + package test; + + import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +Will generate a go struct which looks a lot like this: + + type A struct { + Description string + Number int64 + Id github_com_gogo_protobuf_test_custom.Uuid + } + +You will see there are no pointers, since all fields are non-nullable. +You will also see a custom type which marshals to a string. +Be warned it is your responsibility to test your custom types thoroughly. +You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods. + +Next we will embed the message A in message B. + + message B { + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +See below that A is embedded in B. + + type B struct { + A + G []github_com_gogo_protobuf_test_custom.Uint128 + } + +Also see the repeated custom type. + + type Uint128 [2]uint64 + +Next we will create a custom name for one of our fields. + + message C { + optional int64 size = 1 [(gogoproto.customname) = "MySize"]; + } + +See below that the field's name is MySize and not Size. + + type C struct { + MySize *int64 + } + +The is useful when having a protocol buffer message with a field name which conflicts with a generated method. +As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error. +Using customname you can fix this error without changing the field name. +This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable. + +Gogoprotobuf also has some more subtle changes, these could be changed back: + + - the generated package name for imports do not have the extra /filename.pb, + but are actually the imports specified in the .proto file. + +Gogoprotobuf also has lost some features which should be brought back with time: + + - Marshalling and unmarshalling with reflect and without the unsafe package, + this requires work in pointer_reflect.go + +Why does nullable break protocol buffer specifications: + +The protocol buffer specification states, somewhere, that you should be able to tell whether a +field is set or unset. With the option nullable=false this feature is lost, +since your non-nullable fields will always be set. It can be seen as a layer on top of +protocol buffers, where before and after marshalling all non-nullable fields are set +and they cannot be unset. + +Goprotobuf Compatibility: + +Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers. +Gogoprotobuf generates the same code as goprotobuf if no extensions are used. +The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf: + + - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. + - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix + - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method. + - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face + - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. + - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension + - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. + - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). + +Less Typing and Peace of Mind is explained in their specific plugin folders godoc: + + - github.com/gogo/protobuf/plugin/ + +If you do not use any of these extension the code that is generated +will be the same as if goprotobuf has generated it. + +The most complete way to see examples is to look at + + github.com/gogo/protobuf/test/thetest.proto + +Gogoprototest is a seperate project, +because we want to keep gogoprotobuf independent of goprotobuf, +but we still want to test it thoroughly. + +*/ +package gogoproto diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go new file mode 100644 index 00000000..1e91766a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -0,0 +1,874 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gogo.proto + +package gogoproto + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62001, + Name: "gogoproto.goproto_enum_prefix", + Tag: "varint,62001,opt,name=goproto_enum_prefix", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62021, + Name: "gogoproto.goproto_enum_stringer", + Tag: "varint,62021,opt,name=goproto_enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62022, + Name: "gogoproto.enum_stringer", + Tag: "varint,62022,opt,name=enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*string)(nil), + Field: 62023, + Name: "gogoproto.enum_customname", + Tag: "bytes,62023,opt,name=enum_customname", + Filename: "gogo.proto", +} + +var E_Enumdecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62024, + Name: "gogoproto.enumdecl", + Tag: "varint,62024,opt,name=enumdecl", + Filename: "gogo.proto", +} + +var E_EnumvalueCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumValueOptions)(nil), + ExtensionType: (*string)(nil), + Field: 66001, + Name: "gogoproto.enumvalue_customname", + Tag: "bytes,66001,opt,name=enumvalue_customname", + Filename: "gogo.proto", +} + +var E_GoprotoGettersAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63001, + Name: "gogoproto.goproto_getters_all", + Tag: "varint,63001,opt,name=goproto_getters_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63002, + Name: "gogoproto.goproto_enum_prefix_all", + Tag: "varint,63002,opt,name=goproto_enum_prefix_all", + Filename: "gogo.proto", +} + +var E_GoprotoStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63003, + Name: "gogoproto.goproto_stringer_all", + Tag: "varint,63003,opt,name=goproto_stringer_all", + Filename: "gogo.proto", +} + +var E_VerboseEqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63004, + Name: "gogoproto.verbose_equal_all", + Tag: "varint,63004,opt,name=verbose_equal_all", + Filename: "gogo.proto", +} + +var E_FaceAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63005, + Name: "gogoproto.face_all", + Tag: "varint,63005,opt,name=face_all", + Filename: "gogo.proto", +} + +var E_GostringAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63006, + Name: "gogoproto.gostring_all", + Tag: "varint,63006,opt,name=gostring_all", + Filename: "gogo.proto", +} + +var E_PopulateAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63007, + Name: "gogoproto.populate_all", + Tag: "varint,63007,opt,name=populate_all", + Filename: "gogo.proto", +} + +var E_StringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63008, + Name: "gogoproto.stringer_all", + Tag: "varint,63008,opt,name=stringer_all", + Filename: "gogo.proto", +} + +var E_OnlyoneAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63009, + Name: "gogoproto.onlyone_all", + Tag: "varint,63009,opt,name=onlyone_all", + Filename: "gogo.proto", +} + +var E_EqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63013, + Name: "gogoproto.equal_all", + Tag: "varint,63013,opt,name=equal_all", + Filename: "gogo.proto", +} + +var E_DescriptionAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63014, + Name: "gogoproto.description_all", + Tag: "varint,63014,opt,name=description_all", + Filename: "gogo.proto", +} + +var E_TestgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63015, + Name: "gogoproto.testgen_all", + Tag: "varint,63015,opt,name=testgen_all", + Filename: "gogo.proto", +} + +var E_BenchgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63016, + Name: "gogoproto.benchgen_all", + Tag: "varint,63016,opt,name=benchgen_all", + Filename: "gogo.proto", +} + +var E_MarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63017, + Name: "gogoproto.marshaler_all", + Tag: "varint,63017,opt,name=marshaler_all", + Filename: "gogo.proto", +} + +var E_UnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63018, + Name: "gogoproto.unmarshaler_all", + Tag: "varint,63018,opt,name=unmarshaler_all", + Filename: "gogo.proto", +} + +var E_StableMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63019, + Name: "gogoproto.stable_marshaler_all", + Tag: "varint,63019,opt,name=stable_marshaler_all", + Filename: "gogo.proto", +} + +var E_SizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63020, + Name: "gogoproto.sizer_all", + Tag: "varint,63020,opt,name=sizer_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63021, + Name: "gogoproto.goproto_enum_stringer_all", + Tag: "varint,63021,opt,name=goproto_enum_stringer_all", + Filename: "gogo.proto", +} + +var E_EnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63022, + Name: "gogoproto.enum_stringer_all", + Tag: "varint,63022,opt,name=enum_stringer_all", + Filename: "gogo.proto", +} + +var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63023, + Name: "gogoproto.unsafe_marshaler_all", + Tag: "varint,63023,opt,name=unsafe_marshaler_all", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63024, + Name: "gogoproto.unsafe_unmarshaler_all", + Tag: "varint,63024,opt,name=unsafe_unmarshaler_all", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63025, + Name: "gogoproto.goproto_extensions_map_all", + Tag: "varint,63025,opt,name=goproto_extensions_map_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63026, + Name: "gogoproto.goproto_unrecognized_all", + Tag: "varint,63026,opt,name=goproto_unrecognized_all", + Filename: "gogo.proto", +} + +var E_GogoprotoImport = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63027, + Name: "gogoproto.gogoproto_import", + Tag: "varint,63027,opt,name=gogoproto_import", + Filename: "gogo.proto", +} + +var E_ProtosizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63028, + Name: "gogoproto.protosizer_all", + Tag: "varint,63028,opt,name=protosizer_all", + Filename: "gogo.proto", +} + +var E_CompareAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63029, + Name: "gogoproto.compare_all", + Tag: "varint,63029,opt,name=compare_all", + Filename: "gogo.proto", +} + +var E_TypedeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63030, + Name: "gogoproto.typedecl_all", + Tag: "varint,63030,opt,name=typedecl_all", + Filename: "gogo.proto", +} + +var E_EnumdeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63031, + Name: "gogoproto.enumdecl_all", + Tag: "varint,63031,opt,name=enumdecl_all", + Filename: "gogo.proto", +} + +var E_GoprotoRegistration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63032, + Name: "gogoproto.goproto_registration", + Tag: "varint,63032,opt,name=goproto_registration", + Filename: "gogo.proto", +} + +var E_MessagenameAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63033, + Name: "gogoproto.messagename_all", + Tag: "varint,63033,opt,name=messagename_all", + Filename: "gogo.proto", +} + +var E_GoprotoSizecacheAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63034, + Name: "gogoproto.goproto_sizecache_all", + Tag: "varint,63034,opt,name=goproto_sizecache_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63035, + Name: "gogoproto.goproto_unkeyed_all", + Tag: "varint,63035,opt,name=goproto_unkeyed_all", + Filename: "gogo.proto", +} + +var E_GoprotoGetters = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64001, + Name: "gogoproto.goproto_getters", + Tag: "varint,64001,opt,name=goproto_getters", + Filename: "gogo.proto", +} + +var E_GoprotoStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64003, + Name: "gogoproto.goproto_stringer", + Tag: "varint,64003,opt,name=goproto_stringer", + Filename: "gogo.proto", +} + +var E_VerboseEqual = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64004, + Name: "gogoproto.verbose_equal", + Tag: "varint,64004,opt,name=verbose_equal", + Filename: "gogo.proto", +} + +var E_Face = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64005, + Name: "gogoproto.face", + Tag: "varint,64005,opt,name=face", + Filename: "gogo.proto", +} + +var E_Gostring = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64006, + Name: "gogoproto.gostring", + Tag: "varint,64006,opt,name=gostring", + Filename: "gogo.proto", +} + +var E_Populate = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64007, + Name: "gogoproto.populate", + Tag: "varint,64007,opt,name=populate", + Filename: "gogo.proto", +} + +var E_Stringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 67008, + Name: "gogoproto.stringer", + Tag: "varint,67008,opt,name=stringer", + Filename: "gogo.proto", +} + +var E_Onlyone = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64009, + Name: "gogoproto.onlyone", + Tag: "varint,64009,opt,name=onlyone", + Filename: "gogo.proto", +} + +var E_Equal = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64013, + Name: "gogoproto.equal", + Tag: "varint,64013,opt,name=equal", + Filename: "gogo.proto", +} + +var E_Description = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64014, + Name: "gogoproto.description", + Tag: "varint,64014,opt,name=description", + Filename: "gogo.proto", +} + +var E_Testgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64015, + Name: "gogoproto.testgen", + Tag: "varint,64015,opt,name=testgen", + Filename: "gogo.proto", +} + +var E_Benchgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64016, + Name: "gogoproto.benchgen", + Tag: "varint,64016,opt,name=benchgen", + Filename: "gogo.proto", +} + +var E_Marshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64017, + Name: "gogoproto.marshaler", + Tag: "varint,64017,opt,name=marshaler", + Filename: "gogo.proto", +} + +var E_Unmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64018, + Name: "gogoproto.unmarshaler", + Tag: "varint,64018,opt,name=unmarshaler", + Filename: "gogo.proto", +} + +var E_StableMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64019, + Name: "gogoproto.stable_marshaler", + Tag: "varint,64019,opt,name=stable_marshaler", + Filename: "gogo.proto", +} + +var E_Sizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64020, + Name: "gogoproto.sizer", + Tag: "varint,64020,opt,name=sizer", + Filename: "gogo.proto", +} + +var E_UnsafeMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64023, + Name: "gogoproto.unsafe_marshaler", + Tag: "varint,64023,opt,name=unsafe_marshaler", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64024, + Name: "gogoproto.unsafe_unmarshaler", + Tag: "varint,64024,opt,name=unsafe_unmarshaler", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64025, + Name: "gogoproto.goproto_extensions_map", + Tag: "varint,64025,opt,name=goproto_extensions_map", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognized = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64026, + Name: "gogoproto.goproto_unrecognized", + Tag: "varint,64026,opt,name=goproto_unrecognized", + Filename: "gogo.proto", +} + +var E_Protosizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64028, + Name: "gogoproto.protosizer", + Tag: "varint,64028,opt,name=protosizer", + Filename: "gogo.proto", +} + +var E_Compare = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64029, + Name: "gogoproto.compare", + Tag: "varint,64029,opt,name=compare", + Filename: "gogo.proto", +} + +var E_Typedecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64030, + Name: "gogoproto.typedecl", + Tag: "varint,64030,opt,name=typedecl", + Filename: "gogo.proto", +} + +var E_Messagename = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64033, + Name: "gogoproto.messagename", + Tag: "varint,64033,opt,name=messagename", + Filename: "gogo.proto", +} + +var E_GoprotoSizecache = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64034, + Name: "gogoproto.goproto_sizecache", + Tag: "varint,64034,opt,name=goproto_sizecache", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64035, + Name: "gogoproto.goproto_unkeyed", + Tag: "varint,64035,opt,name=goproto_unkeyed", + Filename: "gogo.proto", +} + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65001, + Name: "gogoproto.nullable", + Tag: "varint,65001,opt,name=nullable", + Filename: "gogo.proto", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65002, + Name: "gogoproto.embed", + Tag: "varint,65002,opt,name=embed", + Filename: "gogo.proto", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65003, + Name: "gogoproto.customtype", + Tag: "bytes,65003,opt,name=customtype", + Filename: "gogo.proto", +} + +var E_Customname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65004, + Name: "gogoproto.customname", + Tag: "bytes,65004,opt,name=customname", + Filename: "gogo.proto", +} + +var E_Jsontag = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65005, + Name: "gogoproto.jsontag", + Tag: "bytes,65005,opt,name=jsontag", + Filename: "gogo.proto", +} + +var E_Moretags = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65006, + Name: "gogoproto.moretags", + Tag: "bytes,65006,opt,name=moretags", + Filename: "gogo.proto", +} + +var E_Casttype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65007, + Name: "gogoproto.casttype", + Tag: "bytes,65007,opt,name=casttype", + Filename: "gogo.proto", +} + +var E_Castkey = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65008, + Name: "gogoproto.castkey", + Tag: "bytes,65008,opt,name=castkey", + Filename: "gogo.proto", +} + +var E_Castvalue = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65009, + Name: "gogoproto.castvalue", + Tag: "bytes,65009,opt,name=castvalue", + Filename: "gogo.proto", +} + +var E_Stdtime = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65010, + Name: "gogoproto.stdtime", + Tag: "varint,65010,opt,name=stdtime", + Filename: "gogo.proto", +} + +var E_Stdduration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65011, + Name: "gogoproto.stdduration", + Tag: "varint,65011,opt,name=stdduration", + Filename: "gogo.proto", +} + +var E_Wktpointer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65012, + Name: "gogoproto.wktpointer", + Tag: "varint,65012,opt,name=wktpointer", + Filename: "gogo.proto", +} + +func init() { + proto.RegisterExtension(E_GoprotoEnumPrefix) + proto.RegisterExtension(E_GoprotoEnumStringer) + proto.RegisterExtension(E_EnumStringer) + proto.RegisterExtension(E_EnumCustomname) + proto.RegisterExtension(E_Enumdecl) + proto.RegisterExtension(E_EnumvalueCustomname) + proto.RegisterExtension(E_GoprotoGettersAll) + proto.RegisterExtension(E_GoprotoEnumPrefixAll) + proto.RegisterExtension(E_GoprotoStringerAll) + proto.RegisterExtension(E_VerboseEqualAll) + proto.RegisterExtension(E_FaceAll) + proto.RegisterExtension(E_GostringAll) + proto.RegisterExtension(E_PopulateAll) + proto.RegisterExtension(E_StringerAll) + proto.RegisterExtension(E_OnlyoneAll) + proto.RegisterExtension(E_EqualAll) + proto.RegisterExtension(E_DescriptionAll) + proto.RegisterExtension(E_TestgenAll) + proto.RegisterExtension(E_BenchgenAll) + proto.RegisterExtension(E_MarshalerAll) + proto.RegisterExtension(E_UnmarshalerAll) + proto.RegisterExtension(E_StableMarshalerAll) + proto.RegisterExtension(E_SizerAll) + proto.RegisterExtension(E_GoprotoEnumStringerAll) + proto.RegisterExtension(E_EnumStringerAll) + proto.RegisterExtension(E_UnsafeMarshalerAll) + proto.RegisterExtension(E_UnsafeUnmarshalerAll) + proto.RegisterExtension(E_GoprotoExtensionsMapAll) + proto.RegisterExtension(E_GoprotoUnrecognizedAll) + proto.RegisterExtension(E_GogoprotoImport) + proto.RegisterExtension(E_ProtosizerAll) + proto.RegisterExtension(E_CompareAll) + proto.RegisterExtension(E_TypedeclAll) + proto.RegisterExtension(E_EnumdeclAll) + proto.RegisterExtension(E_GoprotoRegistration) + proto.RegisterExtension(E_MessagenameAll) + proto.RegisterExtension(E_GoprotoSizecacheAll) + proto.RegisterExtension(E_GoprotoUnkeyedAll) + proto.RegisterExtension(E_GoprotoGetters) + proto.RegisterExtension(E_GoprotoStringer) + proto.RegisterExtension(E_VerboseEqual) + proto.RegisterExtension(E_Face) + proto.RegisterExtension(E_Gostring) + proto.RegisterExtension(E_Populate) + proto.RegisterExtension(E_Stringer) + proto.RegisterExtension(E_Onlyone) + proto.RegisterExtension(E_Equal) + proto.RegisterExtension(E_Description) + proto.RegisterExtension(E_Testgen) + proto.RegisterExtension(E_Benchgen) + proto.RegisterExtension(E_Marshaler) + proto.RegisterExtension(E_Unmarshaler) + proto.RegisterExtension(E_StableMarshaler) + proto.RegisterExtension(E_Sizer) + proto.RegisterExtension(E_UnsafeMarshaler) + proto.RegisterExtension(E_UnsafeUnmarshaler) + proto.RegisterExtension(E_GoprotoExtensionsMap) + proto.RegisterExtension(E_GoprotoUnrecognized) + proto.RegisterExtension(E_Protosizer) + proto.RegisterExtension(E_Compare) + proto.RegisterExtension(E_Typedecl) + proto.RegisterExtension(E_Messagename) + proto.RegisterExtension(E_GoprotoSizecache) + proto.RegisterExtension(E_GoprotoUnkeyed) + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) + proto.RegisterExtension(E_Customname) + proto.RegisterExtension(E_Jsontag) + proto.RegisterExtension(E_Moretags) + proto.RegisterExtension(E_Casttype) + proto.RegisterExtension(E_Castkey) + proto.RegisterExtension(E_Castvalue) + proto.RegisterExtension(E_Stdtime) + proto.RegisterExtension(E_Stdduration) + proto.RegisterExtension(E_Wktpointer) +} + +func init() { proto.RegisterFile("gogo.proto", fileDescriptor_592445b5231bc2b9) } + +var fileDescriptor_592445b5231bc2b9 = []byte{ + // 1328 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45, + 0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9, + 0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18, + 0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84, + 0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f, + 0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7, + 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6, + 0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9, + 0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6, + 0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59, + 0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc, + 0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99, + 0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19, + 0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b, + 0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79, + 0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8, + 0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d, + 0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4, + 0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78, + 0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0, + 0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1, + 0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6, + 0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae, + 0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c, + 0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0, + 0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b, + 0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04, + 0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28, + 0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36, + 0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50, + 0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d, + 0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa, + 0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5, + 0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b, + 0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24, + 0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05, + 0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2, + 0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b, + 0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92, + 0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56, + 0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e, + 0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19, + 0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70, + 0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0, + 0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c, + 0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a, + 0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0, + 0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4, + 0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95, + 0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9, + 0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9, + 0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f, + 0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9, + 0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5, + 0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8, + 0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb, + 0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae, + 0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31, + 0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d, + 0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30, + 0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94, + 0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f, + 0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36, + 0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e, + 0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b, + 0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e, + 0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb, + 0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5, + 0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17, + 0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45, + 0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32, + 0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4, + 0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8, + 0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f, + 0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49, + 0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f, + 0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb, + 0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c, + 0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90, + 0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e, + 0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd, + 0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb, + 0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden new file mode 100644 index 00000000..f6502e4b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden @@ -0,0 +1,45 @@ +// Code generated by protoc-gen-go. +// source: gogo.proto +// DO NOT EDIT! + +package gogoproto + +import proto "github.com/gogo/protobuf/proto" +import json "encoding/json" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +// Reference proto, json, and math imports to suppress error if they are not otherwise used. +var _ = proto.Marshal +var _ = &json.SyntaxError{} +var _ = math.Inf + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51235, + Name: "gogoproto.nullable", + Tag: "varint,51235,opt,name=nullable", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51236, + Name: "gogoproto.embed", + Tag: "varint,51236,opt,name=embed", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 51237, + Name: "gogoproto.customtype", + Tag: "bytes,51237,opt,name=customtype", +} + +func init() { + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto new file mode 100644 index 00000000..b80c8565 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -0,0 +1,144 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package gogoproto; + +import "google/protobuf/descriptor.proto"; + +option java_package = "com.google.protobuf"; +option java_outer_classname = "GoGoProtos"; +option go_package = "github.com/gogo/protobuf/gogoproto"; + +extend google.protobuf.EnumOptions { + optional bool goproto_enum_prefix = 62001; + optional bool goproto_enum_stringer = 62021; + optional bool enum_stringer = 62022; + optional string enum_customname = 62023; + optional bool enumdecl = 62024; +} + +extend google.protobuf.EnumValueOptions { + optional string enumvalue_customname = 66001; +} + +extend google.protobuf.FileOptions { + optional bool goproto_getters_all = 63001; + optional bool goproto_enum_prefix_all = 63002; + optional bool goproto_stringer_all = 63003; + optional bool verbose_equal_all = 63004; + optional bool face_all = 63005; + optional bool gostring_all = 63006; + optional bool populate_all = 63007; + optional bool stringer_all = 63008; + optional bool onlyone_all = 63009; + + optional bool equal_all = 63013; + optional bool description_all = 63014; + optional bool testgen_all = 63015; + optional bool benchgen_all = 63016; + optional bool marshaler_all = 63017; + optional bool unmarshaler_all = 63018; + optional bool stable_marshaler_all = 63019; + + optional bool sizer_all = 63020; + + optional bool goproto_enum_stringer_all = 63021; + optional bool enum_stringer_all = 63022; + + optional bool unsafe_marshaler_all = 63023; + optional bool unsafe_unmarshaler_all = 63024; + + optional bool goproto_extensions_map_all = 63025; + optional bool goproto_unrecognized_all = 63026; + optional bool gogoproto_import = 63027; + optional bool protosizer_all = 63028; + optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; + optional bool messagename_all = 63033; + + optional bool goproto_sizecache_all = 63034; + optional bool goproto_unkeyed_all = 63035; +} + +extend google.protobuf.MessageOptions { + optional bool goproto_getters = 64001; + optional bool goproto_stringer = 64003; + optional bool verbose_equal = 64004; + optional bool face = 64005; + optional bool gostring = 64006; + optional bool populate = 64007; + optional bool stringer = 67008; + optional bool onlyone = 64009; + + optional bool equal = 64013; + optional bool description = 64014; + optional bool testgen = 64015; + optional bool benchgen = 64016; + optional bool marshaler = 64017; + optional bool unmarshaler = 64018; + optional bool stable_marshaler = 64019; + + optional bool sizer = 64020; + + optional bool unsafe_marshaler = 64023; + optional bool unsafe_unmarshaler = 64024; + + optional bool goproto_extensions_map = 64025; + optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; + optional bool compare = 64029; + + optional bool typedecl = 64030; + + optional bool messagename = 64033; + + optional bool goproto_sizecache = 64034; + optional bool goproto_unkeyed = 64035; +} + +extend google.protobuf.FieldOptions { + optional bool nullable = 65001; + optional bool embed = 65002; + optional string customtype = 65003; + optional string customname = 65004; + optional string jsontag = 65005; + optional string moretags = 65006; + optional string casttype = 65007; + optional string castkey = 65008; + optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; + optional bool wktpointer = 65012; + +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go new file mode 100644 index 00000000..390d4e4b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -0,0 +1,415 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gogoproto + +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +import proto "github.com/gogo/protobuf/proto" + +func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Embed, false) +} + +func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Nullable, true) +} + +func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdtime, false) +} + +func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdduration, false) +} + +func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue" +} + +func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue" +} + +func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value" +} + +func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value" +} + +func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value" +} + +func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value" +} + +func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue" +} + +func IsStdString(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue" +} + +func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue" +} + +func IsStdType(field *google_protobuf.FieldDescriptorProto) bool { + return (IsStdTime(field) || IsStdDuration(field) || + IsStdDouble(field) || IsStdFloat(field) || + IsStdInt64(field) || IsStdUInt64(field) || + IsStdInt32(field) || IsStdUInt32(field) || + IsStdBool(field) || + IsStdString(field) || IsStdBytes(field)) +} + +func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) +} + +func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { + nullable := IsNullable(field) + if field.IsMessage() || IsCustomType(field) { + return nullable + } + if proto3 { + return false + } + return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES +} + +func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCustomType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastKey(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastValue(field) + if len(typ) > 0 { + return true + } + return false +} + +func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) +} + +func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) +} + +func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customtype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Casttype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castkey) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castvalue) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool { + name := GetCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool { + name := GetEnumCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool { + name := GetEnumValueCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Jsontag) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Moretags) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool + +func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true)) +} + +func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true)) +} + +func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true)) +} + +func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false)) +} + +func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false)) +} + +func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false)) +} + +func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false)) +} + +func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false)) +} + +func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false)) +} + +func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false)) +} + +func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false)) +} + +func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false)) +} + +func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false)) +} + +func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false)) +} + +func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false)) +} + +func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false)) +} + +func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) +} + +func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false)) +} + +func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) +} + +func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false)) +} + +func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false)) +} + +func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false)) +} + +func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true)) +} + +func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) +} + +func IsProto3(file *google_protobuf.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true) +} + +func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) +} + +func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) +} + +func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false)) +} + +func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true)) +} + +func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true)) +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile new file mode 100644 index 00000000..3496dc99 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile @@ -0,0 +1,36 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + go install github.com/gogo/protobuf/protoc-gen-gostring + protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto + protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go new file mode 100644 index 00000000..a85bf198 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go @@ -0,0 +1,118 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/gogo/protobuf/proto" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} + +// Is this field a scalar numeric type? +func (field *FieldDescriptorProto) IsScalar() bool { + if field.Type == nil { + return false + } + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE, + FieldDescriptorProto_TYPE_FLOAT, + FieldDescriptorProto_TYPE_INT64, + FieldDescriptorProto_TYPE_UINT64, + FieldDescriptorProto_TYPE_INT32, + FieldDescriptorProto_TYPE_FIXED64, + FieldDescriptorProto_TYPE_FIXED32, + FieldDescriptorProto_TYPE_BOOL, + FieldDescriptorProto_TYPE_UINT32, + FieldDescriptorProto_TYPE_ENUM, + FieldDescriptorProto_TYPE_SFIXED32, + FieldDescriptorProto_TYPE_SFIXED64, + FieldDescriptorProto_TYPE_SINT32, + FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go new file mode 100644 index 00000000..18b2a331 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -0,0 +1,2865 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} + +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} + +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} + +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} + +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} + +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} + +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} + +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} + +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} + +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} + +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} + +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} + +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} + +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} + +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} + +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} + +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} + +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{0} +} +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(m, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{1} +} +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(m, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2} +} +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (m *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(m, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 0} +} +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 1} +} +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{3} +} + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4} +} +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(m, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{5} +} +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(m, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6} +} +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(m, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6, 0} +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{7} +} +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{8} +} +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{9} +} +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(m, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10} +} + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (m *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(m, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetPhpMetadataNamespace() string { + if m != nil && m.PhpMetadataNamespace != nil { + return *m.PhpMetadataNamespace + } + return "" +} + +func (m *FileOptions) GetRubyPackage() string { + if m != nil && m.RubyPackage != nil { + return *m.RubyPackage + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{11} +} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (m *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(m, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12} +} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (m *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(m, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{13} +} + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (m *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(m, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{14} +} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (m *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(m, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{15} +} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (m *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(m, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{16} +} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (m *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(m, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17} +} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (m *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(m, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18} +} +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(m, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18, 0} +} +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19} +} +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(m, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19, 0} +} +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20} +} +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20, 0} +} +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") +} + +func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_308767df5ffe18af) } + +var fileDescriptor_308767df5ffe18af = []byte{ + // 2522 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0x65, 0x7a, 0xec, 0x75, 0x18, 0xef, 0x47, 0x1c, 0xed, 0x66, + 0xe3, 0x24, 0xbb, 0xca, 0xc2, 0x49, 0x9c, 0xac, 0x53, 0x6c, 0x2b, 0x4b, 0x8c, 0x57, 0xa9, 0xbe, + 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, + 0x83, 0x1e, 0x02, 0xf4, 0x54, 0xa0, 0x7f, 0x40, 0x51, 0x14, 0x3d, 0xf4, 0xb2, 0x40, 0xff, 0x80, + 0x02, 0xed, 0xbd, 0xd7, 0x02, 0xbd, 0xf7, 0x50, 0xa0, 0x05, 0xda, 0x3f, 0xa1, 0xc7, 0x62, 0x66, + 0x48, 0x8a, 0xd4, 0x47, 0xe2, 0x5d, 0x20, 0xd9, 0x93, 0x3d, 0xef, 0xfd, 0xde, 0x9b, 0x37, 0x8f, + 0xbf, 0x79, 0xf3, 0x66, 0x04, 0x82, 0x46, 0x5c, 0xd5, 0xd1, 0x6d, 0xcf, 0x72, 0x2a, 0xb6, 0x63, + 0x79, 0x16, 0x5a, 0x1b, 0x5a, 0xd6, 0xd0, 0x20, 0x7c, 0x74, 0x32, 0x19, 0x94, 0x5b, 0xb0, 0x7e, + 0x4f, 0x37, 0x48, 0x3d, 0x04, 0xf6, 0x88, 0x87, 0xee, 0x40, 0x7a, 0xa0, 0x1b, 0x44, 0x4c, 0xec, + 0xa4, 0x76, 0x0b, 0x7b, 0x1f, 0x56, 0x66, 0x8c, 0x2a, 0x71, 0x8b, 0x2e, 0x15, 0xcb, 0xcc, 0xa2, + 0xfc, 0xef, 0x34, 0x6c, 0x2c, 0xd0, 0x22, 0x04, 0x69, 0x13, 0x8f, 0xa9, 0xc7, 0xc4, 0x6e, 0x5e, + 0x66, 0xff, 0x23, 0x11, 0x56, 0x6c, 0xac, 0x3e, 0xc6, 0x43, 0x22, 0x26, 0x99, 0x38, 0x18, 0xa2, + 0xf7, 0x01, 0x34, 0x62, 0x13, 0x53, 0x23, 0xa6, 0x7a, 0x2a, 0xa6, 0x76, 0x52, 0xbb, 0x79, 0x39, + 0x22, 0x41, 0xd7, 0x60, 0xdd, 0x9e, 0x9c, 0x18, 0xba, 0xaa, 0x44, 0x60, 0xb0, 0x93, 0xda, 0xcd, + 0xc8, 0x02, 0x57, 0xd4, 0xa7, 0xe0, 0xcb, 0xb0, 0xf6, 0x94, 0xe0, 0xc7, 0x51, 0x68, 0x81, 0x41, + 0x4b, 0x54, 0x1c, 0x01, 0xd6, 0xa0, 0x38, 0x26, 0xae, 0x8b, 0x87, 0x44, 0xf1, 0x4e, 0x6d, 0x22, + 0xa6, 0xd9, 0xea, 0x77, 0xe6, 0x56, 0x3f, 0xbb, 0xf2, 0x82, 0x6f, 0xd5, 0x3f, 0xb5, 0x09, 0xaa, + 0x42, 0x9e, 0x98, 0x93, 0x31, 0xf7, 0x90, 0x59, 0x92, 0x3f, 0xc9, 0x9c, 0x8c, 0x67, 0xbd, 0xe4, + 0xa8, 0x99, 0xef, 0x62, 0xc5, 0x25, 0xce, 0x13, 0x5d, 0x25, 0x62, 0x96, 0x39, 0xb8, 0x3c, 0xe7, + 0xa0, 0xc7, 0xf5, 0xb3, 0x3e, 0x02, 0x3b, 0x54, 0x83, 0x3c, 0x79, 0xe6, 0x11, 0xd3, 0xd5, 0x2d, + 0x53, 0x5c, 0x61, 0x4e, 0x2e, 0x2d, 0xf8, 0x8a, 0xc4, 0xd0, 0x66, 0x5d, 0x4c, 0xed, 0xd0, 0x3e, + 0xac, 0x58, 0xb6, 0xa7, 0x5b, 0xa6, 0x2b, 0xe6, 0x76, 0x12, 0xbb, 0x85, 0xbd, 0x77, 0x17, 0x12, + 0xa1, 0xc3, 0x31, 0x72, 0x00, 0x46, 0x0d, 0x10, 0x5c, 0x6b, 0xe2, 0xa8, 0x44, 0x51, 0x2d, 0x8d, + 0x28, 0xba, 0x39, 0xb0, 0xc4, 0x3c, 0x73, 0x70, 0x61, 0x7e, 0x21, 0x0c, 0x58, 0xb3, 0x34, 0xd2, + 0x30, 0x07, 0x96, 0x5c, 0x72, 0x63, 0x63, 0xb4, 0x05, 0x59, 0xf7, 0xd4, 0xf4, 0xf0, 0x33, 0xb1, + 0xc8, 0x18, 0xe2, 0x8f, 0xca, 0x7f, 0xce, 0xc2, 0xda, 0x59, 0x28, 0x76, 0x17, 0x32, 0x03, 0xba, + 0x4a, 0x31, 0xf9, 0x6d, 0x72, 0xc0, 0x6d, 0xe2, 0x49, 0xcc, 0x7e, 0xc7, 0x24, 0x56, 0xa1, 0x60, + 0x12, 0xd7, 0x23, 0x1a, 0x67, 0x44, 0xea, 0x8c, 0x9c, 0x02, 0x6e, 0x34, 0x4f, 0xa9, 0xf4, 0x77, + 0xa2, 0xd4, 0x03, 0x58, 0x0b, 0x43, 0x52, 0x1c, 0x6c, 0x0e, 0x03, 0x6e, 0x5e, 0x7f, 0x55, 0x24, + 0x15, 0x29, 0xb0, 0x93, 0xa9, 0x99, 0x5c, 0x22, 0xb1, 0x31, 0xaa, 0x03, 0x58, 0x26, 0xb1, 0x06, + 0x8a, 0x46, 0x54, 0x43, 0xcc, 0x2d, 0xc9, 0x52, 0x87, 0x42, 0xe6, 0xb2, 0x64, 0x71, 0xa9, 0x6a, + 0xa0, 0xcf, 0xa6, 0x54, 0x5b, 0x59, 0xc2, 0x94, 0x16, 0xdf, 0x64, 0x73, 0x6c, 0x3b, 0x86, 0x92, + 0x43, 0x28, 0xef, 0x89, 0xe6, 0xaf, 0x2c, 0xcf, 0x82, 0xa8, 0xbc, 0x72, 0x65, 0xb2, 0x6f, 0xc6, + 0x17, 0xb6, 0xea, 0x44, 0x87, 0xe8, 0x03, 0x08, 0x05, 0x0a, 0xa3, 0x15, 0xb0, 0x2a, 0x54, 0x0c, + 0x84, 0x6d, 0x3c, 0x26, 0xdb, 0xcf, 0xa1, 0x14, 0x4f, 0x0f, 0xda, 0x84, 0x8c, 0xeb, 0x61, 0xc7, + 0x63, 0x2c, 0xcc, 0xc8, 0x7c, 0x80, 0x04, 0x48, 0x11, 0x53, 0x63, 0x55, 0x2e, 0x23, 0xd3, 0x7f, + 0xd1, 0x8f, 0xa6, 0x0b, 0x4e, 0xb1, 0x05, 0x7f, 0x34, 0xff, 0x45, 0x63, 0x9e, 0x67, 0xd7, 0xbd, + 0x7d, 0x1b, 0x56, 0x63, 0x0b, 0x38, 0xeb, 0xd4, 0xe5, 0x5f, 0xc0, 0xdb, 0x0b, 0x5d, 0xa3, 0x07, + 0xb0, 0x39, 0x31, 0x75, 0xd3, 0x23, 0x8e, 0xed, 0x10, 0xca, 0x58, 0x3e, 0x95, 0xf8, 0x9f, 0x95, + 0x25, 0x9c, 0x3b, 0x8e, 0xa2, 0xb9, 0x17, 0x79, 0x63, 0x32, 0x2f, 0xbc, 0x9a, 0xcf, 0xfd, 0x77, + 0x45, 0x78, 0xf1, 0xe2, 0xc5, 0x8b, 0x64, 0xf9, 0x37, 0x59, 0xd8, 0x5c, 0xb4, 0x67, 0x16, 0x6e, + 0xdf, 0x2d, 0xc8, 0x9a, 0x93, 0xf1, 0x09, 0x71, 0x58, 0x92, 0x32, 0xb2, 0x3f, 0x42, 0x55, 0xc8, + 0x18, 0xf8, 0x84, 0x18, 0x62, 0x7a, 0x27, 0xb1, 0x5b, 0xda, 0xbb, 0x76, 0xa6, 0x5d, 0x59, 0x69, + 0x52, 0x13, 0x99, 0x5b, 0xa2, 0xcf, 0x21, 0xed, 0x97, 0x68, 0xea, 0xe1, 0xea, 0xd9, 0x3c, 0xd0, + 0xbd, 0x24, 0x33, 0x3b, 0xf4, 0x0e, 0xe4, 0xe9, 0x5f, 0xce, 0x8d, 0x2c, 0x8b, 0x39, 0x47, 0x05, + 0x94, 0x17, 0x68, 0x1b, 0x72, 0x6c, 0x9b, 0x68, 0x24, 0x38, 0xda, 0xc2, 0x31, 0x25, 0x96, 0x46, + 0x06, 0x78, 0x62, 0x78, 0xca, 0x13, 0x6c, 0x4c, 0x08, 0x23, 0x7c, 0x5e, 0x2e, 0xfa, 0xc2, 0x9f, + 0x52, 0x19, 0xba, 0x00, 0x05, 0xbe, 0xab, 0x74, 0x53, 0x23, 0xcf, 0x58, 0xf5, 0xcc, 0xc8, 0x7c, + 0xa3, 0x35, 0xa8, 0x84, 0x4e, 0xff, 0xc8, 0xb5, 0xcc, 0x80, 0x9a, 0x6c, 0x0a, 0x2a, 0x60, 0xd3, + 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x5e, 0xde, 0x2c, 0xa7, 0xca, 0x7f, 0x4a, 0x42, 0x9a, 0xd5, + 0x8b, 0x35, 0x28, 0xf4, 0x1f, 0x76, 0x25, 0xa5, 0xde, 0x39, 0x3e, 0x6c, 0x4a, 0x42, 0x02, 0x95, + 0x00, 0x98, 0xe0, 0x5e, 0xb3, 0x53, 0xed, 0x0b, 0xc9, 0x70, 0xdc, 0x68, 0xf7, 0xf7, 0x6f, 0x0a, + 0xa9, 0xd0, 0xe0, 0x98, 0x0b, 0xd2, 0x51, 0xc0, 0x8d, 0x3d, 0x21, 0x83, 0x04, 0x28, 0x72, 0x07, + 0x8d, 0x07, 0x52, 0x7d, 0xff, 0xa6, 0x90, 0x8d, 0x4b, 0x6e, 0xec, 0x09, 0x2b, 0x68, 0x15, 0xf2, + 0x4c, 0x72, 0xd8, 0xe9, 0x34, 0x85, 0x5c, 0xe8, 0xb3, 0xd7, 0x97, 0x1b, 0xed, 0x23, 0x21, 0x1f, + 0xfa, 0x3c, 0x92, 0x3b, 0xc7, 0x5d, 0x01, 0x42, 0x0f, 0x2d, 0xa9, 0xd7, 0xab, 0x1e, 0x49, 0x42, + 0x21, 0x44, 0x1c, 0x3e, 0xec, 0x4b, 0x3d, 0xa1, 0x18, 0x0b, 0xeb, 0xc6, 0x9e, 0xb0, 0x1a, 0x4e, + 0x21, 0xb5, 0x8f, 0x5b, 0x42, 0x09, 0xad, 0xc3, 0x2a, 0x9f, 0x22, 0x08, 0x62, 0x6d, 0x46, 0xb4, + 0x7f, 0x53, 0x10, 0xa6, 0x81, 0x70, 0x2f, 0xeb, 0x31, 0xc1, 0xfe, 0x4d, 0x01, 0x95, 0x6b, 0x90, + 0x61, 0xec, 0x42, 0x08, 0x4a, 0xcd, 0xea, 0xa1, 0xd4, 0x54, 0x3a, 0xdd, 0x7e, 0xa3, 0xd3, 0xae, + 0x36, 0x85, 0xc4, 0x54, 0x26, 0x4b, 0x3f, 0x39, 0x6e, 0xc8, 0x52, 0x5d, 0x48, 0x46, 0x65, 0x5d, + 0xa9, 0xda, 0x97, 0xea, 0x42, 0xaa, 0xac, 0xc2, 0xe6, 0xa2, 0x3a, 0xb9, 0x70, 0x67, 0x44, 0x3e, + 0x71, 0x72, 0xc9, 0x27, 0x66, 0xbe, 0xe6, 0x3e, 0xf1, 0xbf, 0x92, 0xb0, 0xb1, 0xe0, 0xac, 0x58, + 0x38, 0xc9, 0x0f, 0x21, 0xc3, 0x29, 0xca, 0x4f, 0xcf, 0x2b, 0x0b, 0x0f, 0x1d, 0x46, 0xd8, 0xb9, + 0x13, 0x94, 0xd9, 0x45, 0x3b, 0x88, 0xd4, 0x92, 0x0e, 0x82, 0xba, 0x98, 0xab, 0xe9, 0x3f, 0x9f, + 0xab, 0xe9, 0xfc, 0xd8, 0xdb, 0x3f, 0xcb, 0xb1, 0xc7, 0x64, 0xdf, 0xae, 0xb6, 0x67, 0x16, 0xd4, + 0xf6, 0xbb, 0xb0, 0x3e, 0xe7, 0xe8, 0xcc, 0x35, 0xf6, 0x97, 0x09, 0x10, 0x97, 0x25, 0xe7, 0x15, + 0x95, 0x2e, 0x19, 0xab, 0x74, 0x77, 0x67, 0x33, 0x78, 0x71, 0xf9, 0x47, 0x98, 0xfb, 0xd6, 0xdf, + 0x24, 0x60, 0x6b, 0x71, 0xa7, 0xb8, 0x30, 0x86, 0xcf, 0x21, 0x3b, 0x26, 0xde, 0xc8, 0x0a, 0xba, + 0xa5, 0x8f, 0x16, 0x9c, 0xc1, 0x54, 0x3d, 0xfb, 0xb1, 0x7d, 0xab, 0xe8, 0x21, 0x9e, 0x5a, 0xd6, + 0xee, 0xf1, 0x68, 0xe6, 0x22, 0xfd, 0x55, 0x12, 0xde, 0x5e, 0xe8, 0x7c, 0x61, 0xa0, 0xef, 0x01, + 0xe8, 0xa6, 0x3d, 0xf1, 0x78, 0x47, 0xc4, 0x0b, 0x6c, 0x9e, 0x49, 0x58, 0xf1, 0xa2, 0xc5, 0x73, + 0xe2, 0x85, 0xfa, 0x14, 0xd3, 0x03, 0x17, 0x31, 0xc0, 0x9d, 0x69, 0xa0, 0x69, 0x16, 0xe8, 0xfb, + 0x4b, 0x56, 0x3a, 0x47, 0xcc, 0x4f, 0x41, 0x50, 0x0d, 0x9d, 0x98, 0x9e, 0xe2, 0x7a, 0x0e, 0xc1, + 0x63, 0xdd, 0x1c, 0xb2, 0x13, 0x24, 0x77, 0x90, 0x19, 0x60, 0xc3, 0x25, 0xf2, 0x1a, 0x57, 0xf7, + 0x02, 0x2d, 0xb5, 0x60, 0x04, 0x72, 0x22, 0x16, 0xd9, 0x98, 0x05, 0x57, 0x87, 0x16, 0xe5, 0x5f, + 0xe7, 0xa1, 0x10, 0xe9, 0xab, 0xd1, 0x45, 0x28, 0x3e, 0xc2, 0x4f, 0xb0, 0x12, 0xdc, 0x95, 0x78, + 0x26, 0x0a, 0x54, 0xd6, 0xf5, 0xef, 0x4b, 0x9f, 0xc2, 0x26, 0x83, 0x58, 0x13, 0x8f, 0x38, 0x8a, + 0x6a, 0x60, 0xd7, 0x65, 0x49, 0xcb, 0x31, 0x28, 0xa2, 0xba, 0x0e, 0x55, 0xd5, 0x02, 0x0d, 0xba, + 0x05, 0x1b, 0xcc, 0x62, 0x3c, 0x31, 0x3c, 0xdd, 0x36, 0x88, 0x42, 0x6f, 0x6f, 0x2e, 0x3b, 0x49, + 0xc2, 0xc8, 0xd6, 0x29, 0xa2, 0xe5, 0x03, 0x68, 0x44, 0x2e, 0xaa, 0xc3, 0x7b, 0xcc, 0x6c, 0x48, + 0x4c, 0xe2, 0x60, 0x8f, 0x28, 0xe4, 0xeb, 0x09, 0x36, 0x5c, 0x05, 0x9b, 0x9a, 0x32, 0xc2, 0xee, + 0x48, 0xdc, 0xa4, 0x0e, 0x0e, 0x93, 0x62, 0x42, 0x3e, 0x4f, 0x81, 0x47, 0x3e, 0x4e, 0x62, 0xb0, + 0xaa, 0xa9, 0x7d, 0x81, 0xdd, 0x11, 0x3a, 0x80, 0x2d, 0xe6, 0xc5, 0xf5, 0x1c, 0xdd, 0x1c, 0x2a, + 0xea, 0x88, 0xa8, 0x8f, 0x95, 0x89, 0x37, 0xb8, 0x23, 0xbe, 0x13, 0x9d, 0x9f, 0x45, 0xd8, 0x63, + 0x98, 0x1a, 0x85, 0x1c, 0x7b, 0x83, 0x3b, 0xa8, 0x07, 0x45, 0xfa, 0x31, 0xc6, 0xfa, 0x73, 0xa2, + 0x0c, 0x2c, 0x87, 0x1d, 0x8d, 0xa5, 0x05, 0xa5, 0x29, 0x92, 0xc1, 0x4a, 0xc7, 0x37, 0x68, 0x59, + 0x1a, 0x39, 0xc8, 0xf4, 0xba, 0x92, 0x54, 0x97, 0x0b, 0x81, 0x97, 0x7b, 0x96, 0x43, 0x09, 0x35, + 0xb4, 0xc2, 0x04, 0x17, 0x38, 0xa1, 0x86, 0x56, 0x90, 0xde, 0x5b, 0xb0, 0xa1, 0xaa, 0x7c, 0xcd, + 0xba, 0xaa, 0xf8, 0x77, 0x2c, 0x57, 0x14, 0x62, 0xc9, 0x52, 0xd5, 0x23, 0x0e, 0xf0, 0x39, 0xee, + 0xa2, 0xcf, 0xe0, 0xed, 0x69, 0xb2, 0xa2, 0x86, 0xeb, 0x73, 0xab, 0x9c, 0x35, 0xbd, 0x05, 0x1b, + 0xf6, 0xe9, 0xbc, 0x21, 0x8a, 0xcd, 0x68, 0x9f, 0xce, 0x9a, 0xdd, 0x86, 0x4d, 0x7b, 0x64, 0xcf, + 0xdb, 0x5d, 0x8d, 0xda, 0x21, 0x7b, 0x64, 0xcf, 0x1a, 0x5e, 0x62, 0x17, 0x6e, 0x87, 0xa8, 0xd8, + 0x23, 0x9a, 0x78, 0x2e, 0x0a, 0x8f, 0x28, 0xd0, 0x75, 0x10, 0x54, 0x55, 0x21, 0x26, 0x3e, 0x31, + 0x88, 0x82, 0x1d, 0x62, 0x62, 0x57, 0xbc, 0x10, 0x05, 0x97, 0x54, 0x55, 0x62, 0xda, 0x2a, 0x53, + 0xa2, 0xab, 0xb0, 0x6e, 0x9d, 0x3c, 0x52, 0x39, 0x25, 0x15, 0xdb, 0x21, 0x03, 0xfd, 0x99, 0xf8, + 0x21, 0xcb, 0xef, 0x1a, 0x55, 0x30, 0x42, 0x76, 0x99, 0x18, 0x5d, 0x01, 0x41, 0x75, 0x47, 0xd8, + 0xb1, 0x59, 0x4d, 0x76, 0x6d, 0xac, 0x12, 0xf1, 0x12, 0x87, 0x72, 0x79, 0x3b, 0x10, 0xd3, 0x2d, + 0xe1, 0x3e, 0xd5, 0x07, 0x5e, 0xe0, 0xf1, 0x32, 0xdf, 0x12, 0x4c, 0xe6, 0x7b, 0xdb, 0x05, 0x81, + 0xa6, 0x22, 0x36, 0xf1, 0x2e, 0x83, 0x95, 0xec, 0x91, 0x1d, 0x9d, 0xf7, 0x03, 0x58, 0xa5, 0xc8, + 0xe9, 0xa4, 0x57, 0x78, 0x43, 0x66, 0x8f, 0x22, 0x33, 0xde, 0x84, 0x2d, 0x0a, 0x1a, 0x13, 0x0f, + 0x6b, 0xd8, 0xc3, 0x11, 0xf4, 0xc7, 0x0c, 0x4d, 0xf3, 0xde, 0xf2, 0x95, 0xb1, 0x38, 0x9d, 0xc9, + 0xc9, 0x69, 0xc8, 0xac, 0x4f, 0x78, 0x9c, 0x54, 0x16, 0x70, 0xeb, 0xb5, 0x35, 0xdd, 0xe5, 0x03, + 0x28, 0x46, 0x89, 0x8f, 0xf2, 0xc0, 0xa9, 0x2f, 0x24, 0x68, 0x17, 0x54, 0xeb, 0xd4, 0x69, 0xff, + 0xf2, 0x95, 0x24, 0x24, 0x69, 0x1f, 0xd5, 0x6c, 0xf4, 0x25, 0x45, 0x3e, 0x6e, 0xf7, 0x1b, 0x2d, + 0x49, 0x48, 0x45, 0x1b, 0xf6, 0xbf, 0x26, 0xa1, 0x14, 0xbf, 0x7b, 0xa1, 0x1f, 0xc0, 0xb9, 0xe0, + 0xa1, 0xc4, 0x25, 0x9e, 0xf2, 0x54, 0x77, 0xd8, 0x5e, 0x1c, 0x63, 0x7e, 0x2e, 0x86, 0x6c, 0xd8, + 0xf4, 0x51, 0x3d, 0xe2, 0x7d, 0xa9, 0x3b, 0x74, 0xa7, 0x8d, 0xb1, 0x87, 0x9a, 0x70, 0xc1, 0xb4, + 0x14, 0xd7, 0xc3, 0xa6, 0x86, 0x1d, 0x4d, 0x99, 0x3e, 0x51, 0x29, 0x58, 0x55, 0x89, 0xeb, 0x5a, + 0xfc, 0x0c, 0x0c, 0xbd, 0xbc, 0x6b, 0x5a, 0x3d, 0x1f, 0x3c, 0x3d, 0x1c, 0xaa, 0x3e, 0x74, 0x86, + 0xb9, 0xa9, 0x65, 0xcc, 0x7d, 0x07, 0xf2, 0x63, 0x6c, 0x2b, 0xc4, 0xf4, 0x9c, 0x53, 0xd6, 0x71, + 0xe7, 0xe4, 0xdc, 0x18, 0xdb, 0x12, 0x1d, 0xbf, 0x99, 0x8b, 0xcf, 0x3f, 0x52, 0x50, 0x8c, 0x76, + 0xdd, 0xf4, 0x12, 0xa3, 0xb2, 0x03, 0x2a, 0xc1, 0x4a, 0xd8, 0x07, 0x2f, 0xed, 0xd1, 0x2b, 0x35, + 0x7a, 0x72, 0x1d, 0x64, 0x79, 0x2f, 0x2c, 0x73, 0x4b, 0xda, 0x35, 0x50, 0x6a, 0x11, 0xde, 0x7b, + 0xe4, 0x64, 0x7f, 0x84, 0x8e, 0x20, 0xfb, 0xc8, 0x65, 0xbe, 0xb3, 0xcc, 0xf7, 0x87, 0x2f, 0xf7, + 0x7d, 0xbf, 0xc7, 0x9c, 0xe7, 0xef, 0xf7, 0x94, 0x76, 0x47, 0x6e, 0x55, 0x9b, 0xb2, 0x6f, 0x8e, + 0xce, 0x43, 0xda, 0xc0, 0xcf, 0x4f, 0xe3, 0x67, 0x1c, 0x13, 0x9d, 0x35, 0xf1, 0xe7, 0x21, 0xfd, + 0x94, 0xe0, 0xc7, 0xf1, 0x93, 0x85, 0x89, 0x5e, 0x23, 0xf5, 0xaf, 0x43, 0x86, 0xe5, 0x0b, 0x01, + 0xf8, 0x19, 0x13, 0xde, 0x42, 0x39, 0x48, 0xd7, 0x3a, 0x32, 0xa5, 0xbf, 0x00, 0x45, 0x2e, 0x55, + 0xba, 0x0d, 0xa9, 0x26, 0x09, 0xc9, 0xf2, 0x2d, 0xc8, 0xf2, 0x24, 0xd0, 0xad, 0x11, 0xa6, 0x41, + 0x78, 0xcb, 0x1f, 0xfa, 0x3e, 0x12, 0x81, 0xf6, 0xb8, 0x75, 0x28, 0xc9, 0x42, 0x32, 0xfa, 0x79, + 0x5d, 0x28, 0x46, 0x1b, 0xee, 0x37, 0xc3, 0xa9, 0xbf, 0x24, 0xa0, 0x10, 0x69, 0xa0, 0x69, 0xe7, + 0x83, 0x0d, 0xc3, 0x7a, 0xaa, 0x60, 0x43, 0xc7, 0xae, 0x4f, 0x0a, 0x60, 0xa2, 0x2a, 0x95, 0x9c, + 0xf5, 0xa3, 0xbd, 0x91, 0xe0, 0x7f, 0x9f, 0x00, 0x61, 0xb6, 0x77, 0x9d, 0x09, 0x30, 0xf1, 0xbd, + 0x06, 0xf8, 0xbb, 0x04, 0x94, 0xe2, 0x0d, 0xeb, 0x4c, 0x78, 0x17, 0xbf, 0xd7, 0xf0, 0xfe, 0x99, + 0x84, 0xd5, 0x58, 0x9b, 0x7a, 0xd6, 0xe8, 0xbe, 0x86, 0x75, 0x5d, 0x23, 0x63, 0xdb, 0xf2, 0x88, + 0xa9, 0x9e, 0x2a, 0x06, 0x79, 0x42, 0x0c, 0xb1, 0xcc, 0x0a, 0xc5, 0xf5, 0x97, 0x37, 0xc2, 0x95, + 0xc6, 0xd4, 0xae, 0x49, 0xcd, 0x0e, 0x36, 0x1a, 0x75, 0xa9, 0xd5, 0xed, 0xf4, 0xa5, 0x76, 0xed, + 0xa1, 0x72, 0xdc, 0xfe, 0x71, 0xbb, 0xf3, 0x65, 0x5b, 0x16, 0xf4, 0x19, 0xd8, 0x6b, 0xdc, 0xea, + 0x5d, 0x10, 0x66, 0x83, 0x42, 0xe7, 0x60, 0x51, 0x58, 0xc2, 0x5b, 0x68, 0x03, 0xd6, 0xda, 0x1d, + 0xa5, 0xd7, 0xa8, 0x4b, 0x8a, 0x74, 0xef, 0x9e, 0x54, 0xeb, 0xf7, 0xf8, 0xd3, 0x46, 0x88, 0xee, + 0xc7, 0x37, 0xf5, 0x6f, 0x53, 0xb0, 0xb1, 0x20, 0x12, 0x54, 0xf5, 0x2f, 0x25, 0xfc, 0x9e, 0xf4, + 0xc9, 0x59, 0xa2, 0xaf, 0xd0, 0xae, 0xa0, 0x8b, 0x1d, 0xcf, 0xbf, 0xc3, 0x5c, 0x01, 0x9a, 0x25, + 0xd3, 0xd3, 0x07, 0x3a, 0x71, 0xfc, 0x97, 0x20, 0x7e, 0x53, 0x59, 0x9b, 0xca, 0xf9, 0x63, 0xd0, + 0xc7, 0x80, 0x6c, 0xcb, 0xd5, 0x3d, 0xfd, 0x09, 0x51, 0x74, 0x33, 0x78, 0x36, 0xa2, 0x37, 0x97, + 0xb4, 0x2c, 0x04, 0x9a, 0x86, 0xe9, 0x85, 0x68, 0x93, 0x0c, 0xf1, 0x0c, 0x9a, 0x16, 0xf0, 0x94, + 0x2c, 0x04, 0x9a, 0x10, 0x7d, 0x11, 0x8a, 0x9a, 0x35, 0xa1, 0xed, 0x1c, 0xc7, 0xd1, 0xf3, 0x22, + 0x21, 0x17, 0xb8, 0x2c, 0x84, 0xf8, 0x8d, 0xfa, 0xf4, 0xbd, 0xaa, 0x28, 0x17, 0xb8, 0x8c, 0x43, + 0x2e, 0xc3, 0x1a, 0x1e, 0x0e, 0x1d, 0xea, 0x3c, 0x70, 0xc4, 0xaf, 0x1e, 0xa5, 0x50, 0xcc, 0x80, + 0xdb, 0xf7, 0x21, 0x17, 0xe4, 0x81, 0x1e, 0xc9, 0x34, 0x13, 0x8a, 0xcd, 0xef, 0xd3, 0xc9, 0xdd, + 0xbc, 0x9c, 0x33, 0x03, 0xe5, 0x45, 0x28, 0xea, 0xae, 0x32, 0x7d, 0x7e, 0x4f, 0xee, 0x24, 0x77, + 0x73, 0x72, 0x41, 0x77, 0xc3, 0xa7, 0xcb, 0xf2, 0x37, 0x49, 0x28, 0xc5, 0x7f, 0x3e, 0x40, 0x75, + 0xc8, 0x19, 0x96, 0x8a, 0x19, 0xb5, 0xf8, 0x6f, 0x57, 0xbb, 0xaf, 0xf8, 0xc5, 0xa1, 0xd2, 0xf4, + 0xf1, 0x72, 0x68, 0xb9, 0xfd, 0xb7, 0x04, 0xe4, 0x02, 0x31, 0xda, 0x82, 0xb4, 0x8d, 0xbd, 0x11, + 0x73, 0x97, 0x39, 0x4c, 0x0a, 0x09, 0x99, 0x8d, 0xa9, 0xdc, 0xb5, 0xb1, 0xc9, 0x28, 0xe0, 0xcb, + 0xe9, 0x98, 0x7e, 0x57, 0x83, 0x60, 0x8d, 0xdd, 0x6b, 0xac, 0xf1, 0x98, 0x98, 0x9e, 0x1b, 0x7c, + 0x57, 0x5f, 0x5e, 0xf3, 0xc5, 0xe8, 0x1a, 0xac, 0x7b, 0x0e, 0xd6, 0x8d, 0x18, 0x36, 0xcd, 0xb0, + 0x42, 0xa0, 0x08, 0xc1, 0x07, 0x70, 0x3e, 0xf0, 0xab, 0x11, 0x0f, 0xab, 0x23, 0xa2, 0x4d, 0x8d, + 0xb2, 0xec, 0xfd, 0xe2, 0x9c, 0x0f, 0xa8, 0xfb, 0xfa, 0xc0, 0xb6, 0xfc, 0xf7, 0x04, 0xac, 0x07, + 0x37, 0x31, 0x2d, 0x4c, 0x56, 0x0b, 0x00, 0x9b, 0xa6, 0xe5, 0x45, 0xd3, 0x35, 0x4f, 0xe5, 0x39, + 0xbb, 0x4a, 0x35, 0x34, 0x92, 0x23, 0x0e, 0xb6, 0xc7, 0x00, 0x53, 0xcd, 0xd2, 0xb4, 0x5d, 0x80, + 0x82, 0xff, 0xdb, 0x10, 0xfb, 0x81, 0x91, 0xdf, 0xdd, 0x81, 0x8b, 0xe8, 0x95, 0x0d, 0x6d, 0x42, + 0xe6, 0x84, 0x0c, 0x75, 0xd3, 0x7f, 0xf1, 0xe5, 0x83, 0xe0, 0x85, 0x25, 0x1d, 0xbe, 0xb0, 0x1c, + 0xfe, 0x0c, 0x36, 0x54, 0x6b, 0x3c, 0x1b, 0xee, 0xa1, 0x30, 0xf3, 0x7e, 0xe0, 0x7e, 0x91, 0xf8, + 0x0a, 0xa6, 0x2d, 0xe6, 0xff, 0x12, 0x89, 0x3f, 0x24, 0x53, 0x47, 0xdd, 0xc3, 0x3f, 0x26, 0xb7, + 0x8f, 0xb8, 0x69, 0x37, 0x58, 0xa9, 0x4c, 0x06, 0x06, 0x51, 0x69, 0xf4, 0xff, 0x0f, 0x00, 0x00, + 0xff, 0xff, 0x88, 0x17, 0xc1, 0xbe, 0x38, 0x1d, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go new file mode 100644 index 00000000..165b2110 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -0,0 +1,752 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + proto "github.com/gogo/protobuf/proto" + math "math" + reflect "reflect" + sort "sort" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (this *FileDescriptorSet) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.FileDescriptorSet{") + if this.File != nil { + s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 16) + s = append(s, "&descriptor.FileDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Package != nil { + s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n") + } + if this.Dependency != nil { + s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n") + } + if this.PublicDependency != nil { + s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n") + } + if this.WeakDependency != nil { + s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n") + } + if this.MessageType != nil { + s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.Service != nil { + s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceCodeInfo != nil { + s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n") + } + if this.Syntax != nil { + s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.DescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Field != nil { + s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.NestedType != nil { + s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.ExtensionRange != nil { + s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n") + } + if this.OneofDecl != nil { + s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ExtensionRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.DescriptorProto_ReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ExtensionRangeOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.ExtensionRangeOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.FieldDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Label != nil { + s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n") + } + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n") + } + if this.TypeName != nil { + s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") + } + if this.Extendee != nil { + s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n") + } + if this.DefaultValue != nil { + s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n") + } + if this.OneofIndex != nil { + s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n") + } + if this.JsonName != nil { + s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.OneofDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.EnumDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto_EnumReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumValueDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.ServiceDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Method != nil { + s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&descriptor.MethodDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.InputType != nil { + s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n") + } + if this.OutputType != nil { + s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ClientStreaming != nil { + s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n") + } + if this.ServerStreaming != nil { + s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 25) + s = append(s, "&descriptor.FileOptions{") + if this.JavaPackage != nil { + s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") + } + if this.JavaOuterClassname != nil { + s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n") + } + if this.JavaMultipleFiles != nil { + s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n") + } + if this.JavaGenerateEqualsAndHash != nil { + s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n") + } + if this.JavaStringCheckUtf8 != nil { + s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") + } + if this.OptimizeFor != nil { + s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n") + } + if this.GoPackage != nil { + s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") + } + if this.CcGenericServices != nil { + s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n") + } + if this.JavaGenericServices != nil { + s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n") + } + if this.PyGenericServices != nil { + s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") + } + if this.PhpGenericServices != nil { + s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.CcEnableArenas != nil { + s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n") + } + if this.ObjcClassPrefix != nil { + s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n") + } + if this.CsharpNamespace != nil { + s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") + } + if this.SwiftPrefix != nil { + s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") + } + if this.PhpClassPrefix != nil { + s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n") + } + if this.PhpNamespace != nil { + s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") + } + if this.PhpMetadataNamespace != nil { + s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n") + } + if this.RubyPackage != nil { + s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MessageOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.MessageOptions{") + if this.MessageSetWireFormat != nil { + s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n") + } + if this.NoStandardDescriptorAccessor != nil { + s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.MapEntry != nil { + s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.FieldOptions{") + if this.Ctype != nil { + s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n") + } + if this.Packed != nil { + s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") + } + if this.Jstype != nil { + s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n") + } + if this.Lazy != nil { + s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.Weak != nil { + s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.OneofOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumOptions{") + if this.AllowAlias != nil { + s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumValueOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.ServiceOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.MethodOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.IdempotencyLevel != nil { + s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.UninterpretedOption{") + if this.Name != nil { + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + } + if this.IdentifierValue != nil { + s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n") + } + if this.PositiveIntValue != nil { + s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n") + } + if this.NegativeIntValue != nil { + s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n") + } + if this.DoubleValue != nil { + s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n") + } + if this.StringValue != nil { + s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n") + } + if this.AggregateValue != nil { + s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption_NamePart) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.UninterpretedOption_NamePart{") + if this.NamePart != nil { + s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n") + } + if this.IsExtension != nil { + s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.SourceCodeInfo{") + if this.Location != nil { + s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo_Location) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.SourceCodeInfo_Location{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.Span != nil { + s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n") + } + if this.LeadingComments != nil { + s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n") + } + if this.TrailingComments != nil { + s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n") + } + if this.LeadingDetachedComments != nil { + s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.GeneratedCodeInfo{") + if this.Annotation != nil { + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo_Annotation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.SourceFile != nil { + s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") + } + if this.Begin != nil { + s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDescriptor(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string { + e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m) + if e == nil { + return "nil" + } + s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{" + keys := make([]int, 0, len(e)) + for k := range e { + keys = append(keys, int(k)) + } + sort.Ints(keys) + ss := []string{} + for _, k := range keys { + ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) + } + s += strings.Join(ss, ",") + "})" + return s +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go new file mode 100644 index 00000000..e0846a35 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go @@ -0,0 +1,390 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package descriptor + +import ( + "strings" +) + +func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) { + if !msg.GetOptions().GetMapEntry() { + return nil, nil + } + return msg.GetField()[0], msg.GetField()[1] +} + +func dotToUnderscore(r rune) rune { + if r == '.' { + return '_' + } + return r +} + +func (field *FieldDescriptorProto) WireType() (wire int) { + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE: + return 1 + case FieldDescriptorProto_TYPE_FLOAT: + return 5 + case FieldDescriptorProto_TYPE_INT64: + return 0 + case FieldDescriptorProto_TYPE_UINT64: + return 0 + case FieldDescriptorProto_TYPE_INT32: + return 0 + case FieldDescriptorProto_TYPE_UINT32: + return 0 + case FieldDescriptorProto_TYPE_FIXED64: + return 1 + case FieldDescriptorProto_TYPE_FIXED32: + return 5 + case FieldDescriptorProto_TYPE_BOOL: + return 0 + case FieldDescriptorProto_TYPE_STRING: + return 2 + case FieldDescriptorProto_TYPE_GROUP: + return 2 + case FieldDescriptorProto_TYPE_MESSAGE: + return 2 + case FieldDescriptorProto_TYPE_BYTES: + return 2 + case FieldDescriptorProto_TYPE_ENUM: + return 0 + case FieldDescriptorProto_TYPE_SFIXED32: + return 5 + case FieldDescriptorProto_TYPE_SFIXED64: + return 1 + case FieldDescriptorProto_TYPE_SINT32: + return 0 + case FieldDescriptorProto_TYPE_SINT64: + return 0 + } + panic("unreachable") +} + +func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { + packed := field.IsPacked() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { + packed := field.IsPacked3() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey() []byte { + x := field.GetKeyUint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (field *FieldDescriptorProto) GetKey3() []byte { + x := field.GetKey3Uint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { + msg := desc.GetMessage(packageName, messageName) + if msg == nil { + return nil + } + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto { + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+".")) + if nes != nil { + return nes + } + } + return nil +} + +func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+".")) + if res != nil { + return res + } + } + return nil +} + +func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + if msg.GetName()+"."+nes.GetName() == typeName { + return nes + } + } + } + } + return nil +} + +func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + if msg.GetName()+"."+nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + } + } + return false +} + +func (msg *DescriptorProto) IsExtendable() bool { + return len(msg.GetExtensionRange()) > 0 +} + +func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetName() == fieldName { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetNumber() == fieldNum { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", "" + } + field := parent.GetFieldDescriptor(fieldName) + if field == nil { + var extPackageName string + extPackageName, field = desc.FindExtension(packageName, typeName, fieldName) + if field == nil { + return "", "" + } + packageName = extPackageName + } + typeNames := strings.Split(field.GetTypeName(), ".") + if len(typeNames) == 1 { + msg := desc.GetMessage(packageName, typeName) + if msg == nil { + return "", "" + } + return packageName, msg.GetName() + } + if len(typeNames) > 2 { + for i := 1; i < len(typeNames)-1; i++ { + packageName = strings.Join(typeNames[1:len(typeNames)-i], ".") + typeName = strings.Join(typeNames[len(typeNames)-i:], ".") + msg := desc.GetMessage(packageName, typeName) + if msg != nil { + typeNames := strings.Split(msg.GetName(), ".") + if len(typeNames) == 1 { + return packageName, msg.GetName() + } + return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1] + } + } + } + return "", "" +} + +func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto { + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, enum := range file.GetEnumType() { + if enum.GetName() == typeName { + return enum + } + } + } + return nil +} + +func (f *FieldDescriptorProto) IsEnum() bool { + return *f.Type == FieldDescriptorProto_TYPE_ENUM +} + +func (f *FieldDescriptorProto) IsMessage() bool { + return *f.Type == FieldDescriptorProto_TYPE_MESSAGE +} + +func (f *FieldDescriptorProto) IsBytes() bool { + return *f.Type == FieldDescriptorProto_TYPE_BYTES +} + +func (f *FieldDescriptorProto) IsRepeated() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED +} + +func (f *FieldDescriptorProto) IsString() bool { + return *f.Type == FieldDescriptorProto_TYPE_STRING +} + +func (f *FieldDescriptorProto) IsBool() bool { + return *f.Type == FieldDescriptorProto_TYPE_BOOL +} + +func (f *FieldDescriptorProto) IsRequired() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED +} + +func (f *FieldDescriptorProto) IsPacked() bool { + return f.Options != nil && f.GetOptions().GetPacked() +} + +func (f *FieldDescriptorProto) IsPacked3() bool { + if f.IsRepeated() && f.IsScalar() { + if f.Options == nil || f.GetOptions().Packed == nil { + return true + } + return f.Options != nil && f.GetOptions().GetPacked() + } + return false +} + +func (m *DescriptorProto) HasExtension() bool { + return len(m.ExtensionRange) > 0 +} diff --git a/vendor/github.com/google/pprof/AUTHORS b/vendor/github.com/google/pprof/AUTHORS new file mode 100644 index 00000000..fd736cb1 --- /dev/null +++ b/vendor/github.com/google/pprof/AUTHORS @@ -0,0 +1,7 @@ +# This is the official list of pprof authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. +Google Inc. \ No newline at end of file diff --git a/vendor/github.com/google/pprof/CONTRIBUTORS b/vendor/github.com/google/pprof/CONTRIBUTORS new file mode 100644 index 00000000..8c8c37d2 --- /dev/null +++ b/vendor/github.com/google/pprof/CONTRIBUTORS @@ -0,0 +1,16 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name +Raul Silvera +Tipp Moseley +Hyoun Kyu Cho +Martin Spier +Taco de Wolff +Andrew Hunter diff --git a/vendor/github.com/google/pprof/LICENSE b/vendor/github.com/google/pprof/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/google/pprof/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go new file mode 100644 index 00000000..860bb304 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -0,0 +1,591 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "errors" + "sort" + "strings" +) + +func (p *Profile) decoder() []decoder { + return profileDecoder +} + +// preEncode populates the unexported fields to be used by encode +// (with suffix X) from the corresponding exported fields. The +// exported fields are cleared up to facilitate testing. +func (p *Profile) preEncode() { + strings := make(map[string]int) + addString(strings, "") + + for _, st := range p.SampleType { + st.typeX = addString(strings, st.Type) + st.unitX = addString(strings, st.Unit) + } + + for _, s := range p.Sample { + s.labelX = nil + var keys []string + for k := range s.Label { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := s.Label[k] + for _, v := range vs { + s.labelX = append(s.labelX, + label{ + keyX: addString(strings, k), + strX: addString(strings, v), + }, + ) + } + } + var numKeys []string + for k := range s.NumLabel { + numKeys = append(numKeys, k) + } + sort.Strings(numKeys) + for _, k := range numKeys { + keyX := addString(strings, k) + vs := s.NumLabel[k] + units := s.NumUnit[k] + for i, v := range vs { + var unitX int64 + if len(units) != 0 { + unitX = addString(strings, units[i]) + } + s.labelX = append(s.labelX, + label{ + keyX: keyX, + numX: v, + unitX: unitX, + }, + ) + } + } + s.locationIDX = make([]uint64, len(s.Location)) + for i, loc := range s.Location { + s.locationIDX[i] = loc.ID + } + } + + for _, m := range p.Mapping { + m.fileX = addString(strings, m.File) + m.buildIDX = addString(strings, m.BuildID) + } + + for _, l := range p.Location { + for i, ln := range l.Line { + if ln.Function != nil { + l.Line[i].functionIDX = ln.Function.ID + } else { + l.Line[i].functionIDX = 0 + } + } + if l.Mapping != nil { + l.mappingIDX = l.Mapping.ID + } else { + l.mappingIDX = 0 + } + } + for _, f := range p.Function { + f.nameX = addString(strings, f.Name) + f.systemNameX = addString(strings, f.SystemName) + f.filenameX = addString(strings, f.Filename) + } + + p.dropFramesX = addString(strings, p.DropFrames) + p.keepFramesX = addString(strings, p.KeepFrames) + + if pt := p.PeriodType; pt != nil { + pt.typeX = addString(strings, pt.Type) + pt.unitX = addString(strings, pt.Unit) + } + + p.commentX = nil + for _, c := range p.Comments { + p.commentX = append(p.commentX, addString(strings, c)) + } + + p.defaultSampleTypeX = addString(strings, p.DefaultSampleType) + + p.stringTable = make([]string, len(strings)) + for s, i := range strings { + p.stringTable[i] = s + } +} + +func (p *Profile) encode(b *buffer) { + for _, x := range p.SampleType { + encodeMessage(b, 1, x) + } + for _, x := range p.Sample { + encodeMessage(b, 2, x) + } + for _, x := range p.Mapping { + encodeMessage(b, 3, x) + } + for _, x := range p.Location { + encodeMessage(b, 4, x) + } + for _, x := range p.Function { + encodeMessage(b, 5, x) + } + encodeStrings(b, 6, p.stringTable) + encodeInt64Opt(b, 7, p.dropFramesX) + encodeInt64Opt(b, 8, p.keepFramesX) + encodeInt64Opt(b, 9, p.TimeNanos) + encodeInt64Opt(b, 10, p.DurationNanos) + if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) { + encodeMessage(b, 11, p.PeriodType) + } + encodeInt64Opt(b, 12, p.Period) + encodeInt64s(b, 13, p.commentX) + encodeInt64(b, 14, p.defaultSampleTypeX) +} + +var profileDecoder = []decoder{ + nil, // 0 + // repeated ValueType sample_type = 1 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.SampleType = append(pp.SampleType, x) + return decodeMessage(b, x) + }, + // repeated Sample sample = 2 + func(b *buffer, m message) error { + x := new(Sample) + pp := m.(*Profile) + pp.Sample = append(pp.Sample, x) + return decodeMessage(b, x) + }, + // repeated Mapping mapping = 3 + func(b *buffer, m message) error { + x := new(Mapping) + pp := m.(*Profile) + pp.Mapping = append(pp.Mapping, x) + return decodeMessage(b, x) + }, + // repeated Location location = 4 + func(b *buffer, m message) error { + x := new(Location) + x.Line = b.tmpLines[:0] // Use shared space temporarily + pp := m.(*Profile) + pp.Location = append(pp.Location, x) + err := decodeMessage(b, x) + b.tmpLines = x.Line[:0] + // Copy to shrink size and detach from shared space. + x.Line = append([]Line(nil), x.Line...) + return err + }, + // repeated Function function = 5 + func(b *buffer, m message) error { + x := new(Function) + pp := m.(*Profile) + pp.Function = append(pp.Function, x) + return decodeMessage(b, x) + }, + // repeated string string_table = 6 + func(b *buffer, m message) error { + err := decodeStrings(b, &m.(*Profile).stringTable) + if err != nil { + return err + } + if m.(*Profile).stringTable[0] != "" { + return errors.New("string_table[0] must be ''") + } + return nil + }, + // int64 drop_frames = 7 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) }, + // int64 keep_frames = 8 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) }, + // int64 time_nanos = 9 + func(b *buffer, m message) error { + if m.(*Profile).TimeNanos != 0 { + return errConcatProfile + } + return decodeInt64(b, &m.(*Profile).TimeNanos) + }, + // int64 duration_nanos = 10 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) }, + // ValueType period_type = 11 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.PeriodType = x + return decodeMessage(b, x) + }, + // int64 period = 12 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) }, + // repeated int64 comment = 13 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) }, + // int64 defaultSampleType = 14 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) }, +} + +// postDecode takes the unexported fields populated by decode (with +// suffix X) and populates the corresponding exported fields. +// The unexported fields are cleared up to facilitate testing. +func (p *Profile) postDecode() error { + var err error + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + mappingIds := make([]*Mapping, len(p.Mapping)+1) + for _, m := range p.Mapping { + m.File, err = getString(p.stringTable, &m.fileX, err) + m.BuildID, err = getString(p.stringTable, &m.buildIDX, err) + if m.ID < uint64(len(mappingIds)) { + mappingIds[m.ID] = m + } else { + mappings[m.ID] = m + } + + // If this a main linux kernel mapping with a relocation symbol suffix + // ("[kernel.kallsyms]_text"), extract said suffix. + // It is fairly hacky to handle at this level, but the alternatives appear even worse. + const prefix = "[kernel.kallsyms]" + if strings.HasPrefix(m.File, prefix) { + m.KernelRelocationSymbol = m.File[len(prefix):] + } + } + + functions := make(map[uint64]*Function, len(p.Function)) + functionIds := make([]*Function, len(p.Function)+1) + for _, f := range p.Function { + f.Name, err = getString(p.stringTable, &f.nameX, err) + f.SystemName, err = getString(p.stringTable, &f.systemNameX, err) + f.Filename, err = getString(p.stringTable, &f.filenameX, err) + if f.ID < uint64(len(functionIds)) { + functionIds[f.ID] = f + } else { + functions[f.ID] = f + } + } + + locations := make(map[uint64]*Location, len(p.Location)) + locationIds := make([]*Location, len(p.Location)+1) + for _, l := range p.Location { + if id := l.mappingIDX; id < uint64(len(mappingIds)) { + l.Mapping = mappingIds[id] + } else { + l.Mapping = mappings[id] + } + l.mappingIDX = 0 + for i, ln := range l.Line { + if id := ln.functionIDX; id != 0 { + l.Line[i].functionIDX = 0 + if id < uint64(len(functionIds)) { + l.Line[i].Function = functionIds[id] + } else { + l.Line[i].Function = functions[id] + } + } + } + if l.ID < uint64(len(locationIds)) { + locationIds[l.ID] = l + } else { + locations[l.ID] = l + } + } + + for _, st := range p.SampleType { + st.Type, err = getString(p.stringTable, &st.typeX, err) + st.Unit, err = getString(p.stringTable, &st.unitX, err) + } + + // Pre-allocate space for all locations. + numLocations := 0 + for _, s := range p.Sample { + numLocations += len(s.locationIDX) + } + locBuffer := make([]*Location, numLocations) + + for _, s := range p.Sample { + if len(s.labelX) > 0 { + labels := make(map[string][]string, len(s.labelX)) + numLabels := make(map[string][]int64, len(s.labelX)) + numUnits := make(map[string][]string, len(s.labelX)) + for _, l := range s.labelX { + var key, value string + key, err = getString(p.stringTable, &l.keyX, err) + if l.strX != 0 { + value, err = getString(p.stringTable, &l.strX, err) + labels[key] = append(labels[key], value) + } else if l.numX != 0 || l.unitX != 0 { + numValues := numLabels[key] + units := numUnits[key] + if l.unitX != 0 { + var unit string + unit, err = getString(p.stringTable, &l.unitX, err) + units = padStringArray(units, len(numValues)) + numUnits[key] = append(units, unit) + } + numLabels[key] = append(numLabels[key], l.numX) + } + } + if len(labels) > 0 { + s.Label = labels + } + if len(numLabels) > 0 { + s.NumLabel = numLabels + for key, units := range numUnits { + if len(units) > 0 { + numUnits[key] = padStringArray(units, len(numLabels[key])) + } + } + s.NumUnit = numUnits + } + } + + s.Location = locBuffer[:len(s.locationIDX)] + locBuffer = locBuffer[len(s.locationIDX):] + for i, lid := range s.locationIDX { + if lid < uint64(len(locationIds)) { + s.Location[i] = locationIds[lid] + } else { + s.Location[i] = locations[lid] + } + } + s.locationIDX = nil + } + + p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err) + p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err) + + if pt := p.PeriodType; pt == nil { + p.PeriodType = &ValueType{} + } + + if pt := p.PeriodType; pt != nil { + pt.Type, err = getString(p.stringTable, &pt.typeX, err) + pt.Unit, err = getString(p.stringTable, &pt.unitX, err) + } + + for _, i := range p.commentX { + var c string + c, err = getString(p.stringTable, &i, err) + p.Comments = append(p.Comments, c) + } + + p.commentX = nil + p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err) + p.stringTable = nil + return err +} + +// padStringArray pads arr with enough empty strings to make arr +// length l when arr's length is less than l. +func padStringArray(arr []string, l int) []string { + if l <= len(arr) { + return arr + } + return append(arr, make([]string, l-len(arr))...) +} + +func (p *ValueType) decoder() []decoder { + return valueTypeDecoder +} + +func (p *ValueType) encode(b *buffer) { + encodeInt64Opt(b, 1, p.typeX) + encodeInt64Opt(b, 2, p.unitX) +} + +var valueTypeDecoder = []decoder{ + nil, // 0 + // optional int64 type = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) }, + // optional int64 unit = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) }, +} + +func (p *Sample) decoder() []decoder { + return sampleDecoder +} + +func (p *Sample) encode(b *buffer) { + encodeUint64s(b, 1, p.locationIDX) + encodeInt64s(b, 2, p.Value) + for _, x := range p.labelX { + encodeMessage(b, 3, x) + } +} + +var sampleDecoder = []decoder{ + nil, // 0 + // repeated uint64 location = 1 + func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) }, + // repeated int64 value = 2 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) }, + // repeated Label label = 3 + func(b *buffer, m message) error { + s := m.(*Sample) + n := len(s.labelX) + s.labelX = append(s.labelX, label{}) + return decodeMessage(b, &s.labelX[n]) + }, +} + +func (p label) decoder() []decoder { + return labelDecoder +} + +func (p label) encode(b *buffer) { + encodeInt64Opt(b, 1, p.keyX) + encodeInt64Opt(b, 2, p.strX) + encodeInt64Opt(b, 3, p.numX) + encodeInt64Opt(b, 4, p.unitX) +} + +var labelDecoder = []decoder{ + nil, // 0 + // optional int64 key = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).keyX) }, + // optional int64 str = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) }, + // optional int64 num = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) }, + // optional int64 num = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) }, +} + +func (p *Mapping) decoder() []decoder { + return mappingDecoder +} + +func (p *Mapping) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.Start) + encodeUint64Opt(b, 3, p.Limit) + encodeUint64Opt(b, 4, p.Offset) + encodeInt64Opt(b, 5, p.fileX) + encodeInt64Opt(b, 6, p.buildIDX) + encodeBoolOpt(b, 7, p.HasFunctions) + encodeBoolOpt(b, 8, p.HasFilenames) + encodeBoolOpt(b, 9, p.HasLineNumbers) + encodeBoolOpt(b, 10, p.HasInlineFrames) +} + +var mappingDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10 +} + +func (p *Location) decoder() []decoder { + return locationDecoder +} + +func (p *Location) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.mappingIDX) + encodeUint64Opt(b, 3, p.Address) + for i := range p.Line { + encodeMessage(b, 4, &p.Line[i]) + } + encodeBoolOpt(b, 5, p.IsFolded) +} + +var locationDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3; + func(b *buffer, m message) error { // repeated Line line = 4 + pp := m.(*Location) + n := len(pp.Line) + pp.Line = append(pp.Line, Line{}) + return decodeMessage(b, &pp.Line[n]) + }, + func(b *buffer, m message) error { return decodeBool(b, &m.(*Location).IsFolded) }, // optional bool is_folded = 5; +} + +func (p *Line) decoder() []decoder { + return lineDecoder +} + +func (p *Line) encode(b *buffer) { + encodeUint64Opt(b, 1, p.functionIDX) + encodeInt64Opt(b, 2, p.Line) + encodeInt64Opt(b, 3, p.Column) +} + +var lineDecoder = []decoder{ + nil, // 0 + // optional uint64 function_id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) }, + // optional int64 line = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) }, + // optional int64 column = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Column) }, +} + +func (p *Function) decoder() []decoder { + return functionDecoder +} + +func (p *Function) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeInt64Opt(b, 2, p.nameX) + encodeInt64Opt(b, 3, p.systemNameX) + encodeInt64Opt(b, 4, p.filenameX) + encodeInt64Opt(b, 5, p.StartLine) +} + +var functionDecoder = []decoder{ + nil, // 0 + // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) }, + // optional int64 function_name = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) }, + // optional int64 function_system_name = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) }, + // repeated int64 filename = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) }, + // optional int64 start_line = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) }, +} + +func addString(strings map[string]int, s string) int64 { + i, ok := strings[s] + if !ok { + i = len(strings) + strings[s] = i + } + return int64(i) +} + +func getString(strings []string, strng *int64, err error) (string, error) { + if err != nil { + return "", err + } + s := int(*strng) + if s < 0 || s >= len(strings) { + return "", errMalformed + } + *strng = 0 + return strings[s], nil +} diff --git a/vendor/github.com/google/pprof/profile/filter.go b/vendor/github.com/google/pprof/profile/filter.go new file mode 100644 index 00000000..c794b939 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/filter.go @@ -0,0 +1,274 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +// Implements methods to filter samples from profiles. + +import "regexp" + +// FilterSamplesByName filters the samples in a profile and only keeps +// samples where at least one frame matches focus but none match ignore. +// Returns true is the corresponding regexp matched at least one sample. +func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) { + if focus == nil && ignore == nil && hide == nil && show == nil { + fm = true // Missing focus implies a match + return + } + focusOrIgnore := make(map[uint64]bool) + hidden := make(map[uint64]bool) + for _, l := range p.Location { + if ignore != nil && l.matchesName(ignore) { + im = true + focusOrIgnore[l.ID] = false + } else if focus == nil || l.matchesName(focus) { + fm = true + focusOrIgnore[l.ID] = true + } + + if hide != nil && l.matchesName(hide) { + hm = true + l.Line = l.unmatchedLines(hide) + if len(l.Line) == 0 { + hidden[l.ID] = true + } + } + if show != nil { + l.Line = l.matchedLines(show) + if len(l.Line) == 0 { + hidden[l.ID] = true + } else { + hnm = true + } + } + } + + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + if focusedAndNotIgnored(sample.Location, focusOrIgnore) { + if len(hidden) > 0 { + var locs []*Location + for _, loc := range sample.Location { + if !hidden[loc.ID] { + locs = append(locs, loc) + } + } + if len(locs) == 0 { + // Remove sample with no locations (by not adding it to s). + continue + } + sample.Location = locs + } + s = append(s, sample) + } + } + p.Sample = s + + return +} + +// ShowFrom drops all stack frames above the highest matching frame and returns +// whether a match was found. If showFrom is nil it returns false and does not +// modify the profile. +// +// Example: consider a sample with frames [A, B, C, B], where A is the root. +// ShowFrom(nil) returns false and has frames [A, B, C, B]. +// ShowFrom(A) returns true and has frames [A, B, C, B]. +// ShowFrom(B) returns true and has frames [B, C, B]. +// ShowFrom(C) returns true and has frames [C, B]. +// ShowFrom(D) returns false and drops the sample because no frames remain. +func (p *Profile) ShowFrom(showFrom *regexp.Regexp) (matched bool) { + if showFrom == nil { + return false + } + // showFromLocs stores location IDs that matched ShowFrom. + showFromLocs := make(map[uint64]bool) + // Apply to locations. + for _, loc := range p.Location { + if filterShowFromLocation(loc, showFrom) { + showFromLocs[loc.ID] = true + matched = true + } + } + // For all samples, strip locations after the highest matching one. + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + for i := len(sample.Location) - 1; i >= 0; i-- { + if showFromLocs[sample.Location[i].ID] { + sample.Location = sample.Location[:i+1] + s = append(s, sample) + break + } + } + } + p.Sample = s + return matched +} + +// filterShowFromLocation tests a showFrom regex against a location, removes +// lines after the last match and returns whether a match was found. If the +// mapping is matched, then all lines are kept. +func filterShowFromLocation(loc *Location, showFrom *regexp.Regexp) bool { + if m := loc.Mapping; m != nil && showFrom.MatchString(m.File) { + return true + } + if i := loc.lastMatchedLineIndex(showFrom); i >= 0 { + loc.Line = loc.Line[:i+1] + return true + } + return false +} + +// lastMatchedLineIndex returns the index of the last line that matches a regex, +// or -1 if no match is found. +func (loc *Location) lastMatchedLineIndex(re *regexp.Regexp) int { + for i := len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return i + } + } + } + return -1 +} + +// FilterTagsByName filters the tags in a profile and only keeps +// tags that match show and not hide. +func (p *Profile) FilterTagsByName(show, hide *regexp.Regexp) (sm, hm bool) { + matchRemove := func(name string) bool { + matchShow := show == nil || show.MatchString(name) + matchHide := hide != nil && hide.MatchString(name) + + if matchShow { + sm = true + } + if matchHide { + hm = true + } + return !matchShow || matchHide + } + for _, s := range p.Sample { + for lab := range s.Label { + if matchRemove(lab) { + delete(s.Label, lab) + } + } + for lab := range s.NumLabel { + if matchRemove(lab) { + delete(s.NumLabel, lab) + } + } + } + return +} + +// matchesName returns whether the location matches the regular +// expression. It checks any available function names, file names, and +// mapping object filename. +func (loc *Location) matchesName(re *regexp.Regexp) bool { + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return true + } + } + } + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return true + } + return false +} + +// unmatchedLines returns the lines in the location that do not match +// the regular expression. +func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return nil + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// matchedLines returns the lines in the location that match +// the regular expression. +func (loc *Location) matchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return loc.Line + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if !re.MatchString(fn.Name) && !re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// focusedAndNotIgnored looks up a slice of ids against a map of +// focused/ignored locations. The map only contains locations that are +// explicitly focused or ignored. Returns whether there is at least +// one focused location but no ignored locations. +func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool { + var f bool + for _, loc := range locs { + if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore { + if focus { + // Found focused location. Must keep searching in case there + // is an ignored one as well. + f = true + } else { + // Found ignored location. Can return false right away. + return false + } + } + } + return f +} + +// TagMatch selects tags for filtering +type TagMatch func(s *Sample) bool + +// FilterSamplesByTag removes all samples from the profile, except +// those that match focus and do not match the ignore regular +// expression. +func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) { + samples := make([]*Sample, 0, len(p.Sample)) + for _, s := range p.Sample { + focused, ignored := true, false + if focus != nil { + focused = focus(s) + } + if ignore != nil { + ignored = ignore(s) + } + fm = fm || focused + im = im || ignored + if focused && !ignored { + samples = append(samples, s) + } + } + p.Sample = samples + return +} diff --git a/vendor/github.com/google/pprof/profile/index.go b/vendor/github.com/google/pprof/profile/index.go new file mode 100644 index 00000000..bef1d604 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/index.go @@ -0,0 +1,64 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "fmt" + "strconv" + "strings" +) + +// SampleIndexByName returns the appropriate index for a value of sample index. +// If numeric, it returns the number, otherwise it looks up the text in the +// profile sample types. +func (p *Profile) SampleIndexByName(sampleIndex string) (int, error) { + if sampleIndex == "" { + if dst := p.DefaultSampleType; dst != "" { + for i, t := range sampleTypes(p) { + if t == dst { + return i, nil + } + } + } + // By default select the last sample value + return len(p.SampleType) - 1, nil + } + if i, err := strconv.Atoi(sampleIndex); err == nil { + if i < 0 || i >= len(p.SampleType) { + return 0, fmt.Errorf("sample_index %s is outside the range [0..%d]", sampleIndex, len(p.SampleType)-1) + } + return i, nil + } + + // Remove the inuse_ prefix to support legacy pprof options + // "inuse_space" and "inuse_objects" for profiles containing types + // "space" and "objects". + noInuse := strings.TrimPrefix(sampleIndex, "inuse_") + for i, t := range p.SampleType { + if t.Type == sampleIndex || t.Type == noInuse { + return i, nil + } + } + + return 0, fmt.Errorf("sample_index %q must be one of: %v", sampleIndex, sampleTypes(p)) +} + +func sampleTypes(p *Profile) []string { + types := make([]string, len(p.SampleType)) + for i, t := range p.SampleType { + types[i] = t.Type + } + return types +} diff --git a/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/vendor/github.com/google/pprof/profile/legacy_java_profile.go new file mode 100644 index 00000000..4580bab1 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/legacy_java_profile.go @@ -0,0 +1,315 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert java legacy profiles into +// the profile.proto format. + +package profile + +import ( + "bytes" + "fmt" + "io" + "path/filepath" + "regexp" + "strconv" + "strings" +) + +var ( + attributeRx = regexp.MustCompile(`([\w ]+)=([\w ]+)`) + javaSampleRx = regexp.MustCompile(` *(\d+) +(\d+) +@ +([ x0-9a-f]*)`) + javaLocationRx = regexp.MustCompile(`^\s*0x([[:xdigit:]]+)\s+(.*)\s*$`) + javaLocationFileLineRx = regexp.MustCompile(`^(.*)\s+\((.+):(-?[[:digit:]]+)\)$`) + javaLocationPathRx = regexp.MustCompile(`^(.*)\s+\((.*)\)$`) +) + +// javaCPUProfile returns a new Profile from profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{{Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}}, + } + var err error + var locs map[uint64]*Location + if b, locs, err = parseCPUSamples(b, parse, false, p); err != nil { + return nil, err + } + + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaProfile returns a new profile from heapz or contentionz +// data. b is the profile bytes after the header. +func parseJavaProfile(b []byte) (*Profile, error) { + h := bytes.SplitAfterN(b, []byte("\n"), 2) + if len(h) < 2 { + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{}, + } + header := string(bytes.TrimSpace(h[0])) + + var err error + var pType string + switch header { + case "--- heapz 1 ---": + pType = "heap" + case "--- contentionz 1 ---": + pType = "contention" + default: + return nil, errUnrecognized + } + + if b, err = parseJavaHeader(pType, h[1], p); err != nil { + return nil, err + } + var locs map[uint64]*Location + if b, locs, err = parseJavaSamples(pType, b, p); err != nil { + return nil, err + } + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaHeader parses the attribute section on a java profile and +// populates a profile. Returns the remainder of the buffer after all +// attributes. +func parseJavaHeader(pType string, b []byte, p *Profile) ([]byte, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + h := attributeRx.FindStringSubmatch(line) + if h == nil { + // Not a valid attribute, exit. + return b, nil + } + + attribute, value := strings.TrimSpace(h[1]), strings.TrimSpace(h[2]) + var err error + switch pType + "/" + attribute { + case "heap/format", "cpu/format", "contention/format": + if value != "java" { + return nil, errUnrecognized + } + case "heap/resolution": + p.SampleType = []*ValueType{ + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: value}, + } + case "contention/resolution": + p.SampleType = []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: value}, + } + case "contention/sampling period": + p.PeriodType = &ValueType{ + Type: "contentions", Unit: "count", + } + if p.Period, err = strconv.ParseInt(value, 0, 64); err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + case "contention/ms since reset": + millis, err := strconv.ParseInt(value, 0, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + p.DurationNanos = millis * 1000 * 1000 + default: + return nil, errUnrecognized + } + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, nil +} + +// parseJavaSamples parses the samples from a java profile and +// populates the Samples in a profile. Returns the remainder of the +// buffer after the samples. +func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + locs := make(map[uint64]*Location) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + sample := javaSampleRx.FindStringSubmatch(line) + if sample == nil { + // Not a valid sample, exit. + return b, locs, nil + } + + // Java profiles have data/fields inverted compared to other + // profile types. + var err error + value1, value2, value3 := sample[2], sample[1], sample[3] + addrs, err := parseHexAddresses(value3) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + var sloc []*Location + for _, addr := range addrs { + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + s := &Sample{ + Value: make([]int64, 2), + Location: sloc, + } + + if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + + switch pType { + case "heap": + const javaHeapzSamplingRate = 524288 // 512K + if s.Value[0] == 0 { + return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line) + } + s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}} + s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate) + case "contention": + if period := p.Period; period != 0 { + s.Value[0] = s.Value[0] * p.Period + s.Value[1] = s.Value[1] * p.Period + } + } + p.Sample = append(p.Sample, s) + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, locs, nil +} + +// parseJavaLocations parses the location information in a java +// profile and populates the Locations in a profile. It uses the +// location addresses from the profile as both the ID of each +// location. +func parseJavaLocations(b []byte, locs map[uint64]*Location, p *Profile) error { + r := bytes.NewBuffer(b) + fns := make(map[string]*Function) + for { + line, err := r.ReadString('\n') + if err != nil { + if err != io.EOF { + return err + } + if line == "" { + break + } + } + + if line = strings.TrimSpace(line); line == "" { + continue + } + + jloc := javaLocationRx.FindStringSubmatch(line) + if len(jloc) != 3 { + continue + } + addr, err := strconv.ParseUint(jloc[1], 16, 64) + if err != nil { + return fmt.Errorf("parsing sample %s: %v", line, err) + } + loc := locs[addr] + if loc == nil { + // Unused/unseen + continue + } + var lineFunc, lineFile string + var lineNo int64 + + if fileLine := javaLocationFileLineRx.FindStringSubmatch(jloc[2]); len(fileLine) == 4 { + // Found a line of the form: "function (file:line)" + lineFunc, lineFile = fileLine[1], fileLine[2] + if n, err := strconv.ParseInt(fileLine[3], 10, 64); err == nil && n > 0 { + lineNo = n + } + } else if filePath := javaLocationPathRx.FindStringSubmatch(jloc[2]); len(filePath) == 3 { + // If there's not a file:line, it's a shared library path. + // The path isn't interesting, so just give the .so. + lineFunc, lineFile = filePath[1], filepath.Base(filePath[2]) + } else if strings.Contains(jloc[2], "generated stub/JIT") { + lineFunc = "STUB" + } else { + // Treat whole line as the function name. This is used by the + // java agent for internal states such as "GC" or "VM". + lineFunc = jloc[2] + } + fn := fns[lineFunc] + + if fn == nil { + fn = &Function{ + Name: lineFunc, + SystemName: lineFunc, + Filename: lineFile, + } + fns[lineFunc] = fn + p.Function = append(p.Function, fn) + } + loc.Line = []Line{ + { + Function: fn, + Line: lineNo, + }, + } + loc.Address = 0 + } + + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + + return nil +} diff --git a/vendor/github.com/google/pprof/profile/legacy_profile.go b/vendor/github.com/google/pprof/profile/legacy_profile.go new file mode 100644 index 00000000..8d07fd6c --- /dev/null +++ b/vendor/github.com/google/pprof/profile/legacy_profile.go @@ -0,0 +1,1228 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert legacy profiles into the +// profile.proto format. + +package profile + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "regexp" + "strconv" + "strings" +) + +var ( + countStartRE = regexp.MustCompile(`\A(\S+) profile: total \d+\z`) + countRE = regexp.MustCompile(`\A(\d+) @(( 0x[0-9a-f]+)+)\z`) + + heapHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] *@ *(heap[_a-z0-9]*)/?(\d*)`) + heapSampleRE = regexp.MustCompile(`(-?\d+): *(-?\d+) *\[ *(\d+): *(\d+) *] @([ x0-9a-f]*)`) + + contentionSampleRE = regexp.MustCompile(`(\d+) *(\d+) @([ x0-9a-f]*)`) + + hexNumberRE = regexp.MustCompile(`0x[0-9a-f]+`) + + growthHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ growthz?`) + + fragmentationHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ fragmentationz?`) + + threadzStartRE = regexp.MustCompile(`--- threadz \d+ ---`) + threadStartRE = regexp.MustCompile(`--- Thread ([[:xdigit:]]+) \(name: (.*)/(\d+)\) stack: ---`) + + // Regular expressions to parse process mappings. Support the format used by Linux /proc/.../maps and other tools. + // Recommended format: + // Start End object file name offset(optional) linker build id + // 0x40000-0x80000 /path/to/binary (@FF00) abc123456 + spaceDigits = `\s+[[:digit:]]+` + hexPair = `\s+[[:xdigit:]]+:[[:xdigit:]]+` + oSpace = `\s*` + // Capturing expressions. + cHex = `(?:0x)?([[:xdigit:]]+)` + cHexRange = `\s*` + cHex + `[\s-]?` + oSpace + cHex + `:?` + cSpaceString = `(?:\s+(\S+))?` + cSpaceHex = `(?:\s+([[:xdigit:]]+))?` + cSpaceAtOffset = `(?:\s+\(@([[:xdigit:]]+)\))?` + cPerm = `(?:\s+([-rwxp]+))?` + + procMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceHex + hexPair + spaceDigits + cSpaceString) + briefMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceString + cSpaceAtOffset + cSpaceHex) + + // Regular expression to parse log data, of the form: + // ... file:line] msg... + logInfoRE = regexp.MustCompile(`^[^\[\]]+:[0-9]+]\s`) +) + +func isSpaceOrComment(line string) bool { + trimmed := strings.TrimSpace(line) + return len(trimmed) == 0 || trimmed[0] == '#' +} + +// parseGoCount parses a Go count profile (e.g., threadcreate or +// goroutine) and returns a new Profile. +func parseGoCount(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip comments at the beginning of the file. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + if err := s.Err(); err != nil { + return nil, err + } + m := countStartRE.FindStringSubmatch(s.Text()) + if m == nil { + return nil, errUnrecognized + } + profileType := m[1] + p := &Profile{ + PeriodType: &ValueType{Type: profileType, Unit: "count"}, + Period: 1, + SampleType: []*ValueType{{Type: profileType, Unit: "count"}}, + } + locations := make(map[uint64]*Location) + for s.Scan() { + line := s.Text() + if isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + m := countRE.FindStringSubmatch(line) + if m == nil { + return nil, errMalformed + } + n, err := strconv.ParseInt(m[1], 0, 64) + if err != nil { + return nil, errMalformed + } + fields := strings.Fields(m[2]) + locs := make([]*Location, 0, len(fields)) + for _, stk := range fields { + addr, err := strconv.ParseUint(stk, 0, 64) + if err != nil { + return nil, errMalformed + } + // Adjust all frames by -1 to land on top of the call instruction. + addr-- + loc := locations[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locations[addr] = loc + p.Location = append(p.Location, loc) + } + locs = append(locs, loc) + } + p.Sample = append(p.Sample, &Sample{ + Location: locs, + Value: []int64{n}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +// remapLocationIDs ensures there is a location for each address +// referenced by a sample, and remaps the samples to point to the new +// location ids. +func (p *Profile) remapLocationIDs() { + seen := make(map[*Location]bool, len(p.Location)) + var locs []*Location + + for _, s := range p.Sample { + for _, l := range s.Location { + if seen[l] { + continue + } + l.ID = uint64(len(locs) + 1) + locs = append(locs, l) + seen[l] = true + } + } + p.Location = locs +} + +func (p *Profile) remapFunctionIDs() { + seen := make(map[*Function]bool, len(p.Function)) + var fns []*Function + + for _, l := range p.Location { + for _, ln := range l.Line { + fn := ln.Function + if fn == nil || seen[fn] { + continue + } + fn.ID = uint64(len(fns) + 1) + fns = append(fns, fn) + seen[fn] = true + } + } + p.Function = fns +} + +// remapMappingIDs matches location addresses with existing mappings +// and updates them appropriately. This is O(N*M), if this ever shows +// up as a bottleneck, evaluate sorting the mappings and doing a +// binary search, which would make it O(N*log(M)). +func (p *Profile) remapMappingIDs() { + // Some profile handlers will incorrectly set regions for the main + // executable if its section is remapped. Fix them through heuristics. + + if len(p.Mapping) > 0 { + // Remove the initial mapping if named '/anon_hugepage' and has a + // consecutive adjacent mapping. + if m := p.Mapping[0]; strings.HasPrefix(m.File, "/anon_hugepage") { + if len(p.Mapping) > 1 && m.Limit == p.Mapping[1].Start { + p.Mapping = p.Mapping[1:] + } + } + } + + // Subtract the offset from the start of the main mapping if it + // ends up at a recognizable start address. + if len(p.Mapping) > 0 { + const expectedStart = 0x400000 + if m := p.Mapping[0]; m.Start-m.Offset == expectedStart { + m.Start = expectedStart + m.Offset = 0 + } + } + + // Associate each location with an address to the corresponding + // mapping. Create fake mapping if a suitable one isn't found. + var fake *Mapping +nextLocation: + for _, l := range p.Location { + a := l.Address + if l.Mapping != nil || a == 0 { + continue + } + for _, m := range p.Mapping { + if m.Start <= a && a < m.Limit { + l.Mapping = m + continue nextLocation + } + } + // Work around legacy handlers failing to encode the first + // part of mappings split into adjacent ranges. + for _, m := range p.Mapping { + if m.Offset != 0 && m.Start-m.Offset <= a && a < m.Start { + m.Start -= m.Offset + m.Offset = 0 + l.Mapping = m + continue nextLocation + } + } + // If there is still no mapping, create a fake one. + // This is important for the Go legacy handler, which produced + // no mappings. + if fake == nil { + fake = &Mapping{ + ID: 1, + Limit: ^uint64(0), + } + p.Mapping = append(p.Mapping, fake) + } + l.Mapping = fake + } + + // Reset all mapping IDs. + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +var cpuInts = []func([]byte) (uint64, []byte){ + get32l, + get32b, + get64l, + get64b, +} + +func get32l(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24, b[4:] +} + +func get32b(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24, b[4:] +} + +func get64l(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56, b[8:] +} + +func get64b(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56, b[8:] +} + +// parseCPU parses a profilez legacy profile and returns a newly +// populated Profile. +// +// The general format for profilez samples is a sequence of words in +// binary format. The first words are a header with the following data: +// +// 1st word -- 0 +// 2nd word -- 3 +// 3rd word -- 0 if a c++ application, 1 if a java application. +// 4th word -- Sampling period (in microseconds). +// 5th word -- Padding. +func parseCPU(b []byte) (*Profile, error) { + var parse func([]byte) (uint64, []byte) + var n1, n2, n3, n4, n5 uint64 + for _, parse = range cpuInts { + var tmp []byte + n1, tmp = parse(b) + n2, tmp = parse(tmp) + n3, tmp = parse(tmp) + n4, tmp = parse(tmp) + n5, tmp = parse(tmp) + + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 0 && n4 > 0 && n5 == 0 { + b = tmp + return cpuProfile(b, int64(n4), parse) + } + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 1 && n4 > 0 && n5 == 0 { + b = tmp + return javaCPUProfile(b, int64(n4), parse) + } + } + return nil, errUnrecognized +} + +// cpuProfile returns a new Profile from C++ profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func cpuProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "nanoseconds"}, + }, + } + var err error + if b, _, err = parseCPUSamples(b, parse, true, p); err != nil { + return nil, err + } + + // If *most* samples have the same second-to-the-bottom frame, it + // strongly suggests that it is an uninteresting artifact of + // measurement -- a stack frame pushed by the signal handler. The + // bottom frame is always correct as it is picked up from the signal + // structure, not the stack. Check if this is the case and if so, + // remove. + + // Remove up to two frames. + maxiter := 2 + // Allow one different sample for this many samples with the same + // second-to-last frame. + similarSamples := 32 + margin := len(p.Sample) / similarSamples + + for iter := 0; iter < maxiter; iter++ { + addr1 := make(map[uint64]int) + for _, s := range p.Sample { + if len(s.Location) > 1 { + a := s.Location[1].Address + addr1[a] = addr1[a] + 1 + } + } + + for id1, count := range addr1 { + if count >= len(p.Sample)-margin { + // Found uninteresting frame, strip it out from all samples + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[1].Address == id1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } + break + } + } + } + + if err := p.ParseMemoryMap(bytes.NewBuffer(b)); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +func cleanupDuplicateLocations(p *Profile) { + // The profile handler may duplicate the leaf frame, because it gets + // its address both from stack unwinding and from the signal + // context. Detect this and delete the duplicate, which has been + // adjusted by -1. The leaf address should not be adjusted as it is + // not a call. + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[0].Address == s.Location[1].Address+1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } +} + +// parseCPUSamples parses a collection of profilez samples from a +// profile. +// +// profilez samples are a repeated sequence of stack frames of the +// form: +// +// 1st word -- The number of times this stack was encountered. +// 2nd word -- The size of the stack (StackSize). +// 3rd word -- The first address on the stack. +// ... +// StackSize + 2 -- The last address on the stack +// +// The last stack trace is of the form: +// +// 1st word -- 0 +// 2nd word -- 1 +// 3rd word -- 0 +// +// Addresses from stack traces may point to the next instruction after +// each call. Optionally adjust by -1 to land somewhere on the actual +// call (except for the leaf, which is not a call). +func parseCPUSamples(b []byte, parse func(b []byte) (uint64, []byte), adjust bool, p *Profile) ([]byte, map[uint64]*Location, error) { + locs := make(map[uint64]*Location) + for len(b) > 0 { + var count, nstk uint64 + count, b = parse(b) + nstk, b = parse(b) + if b == nil || nstk > uint64(len(b)/4) { + return nil, nil, errUnrecognized + } + var sloc []*Location + addrs := make([]uint64, nstk) + for i := 0; i < int(nstk); i++ { + addrs[i], b = parse(b) + } + + if count == 0 && nstk == 1 && addrs[0] == 0 { + // End of data marker + break + } + for i, addr := range addrs { + if adjust && i > 0 { + addr-- + } + loc := locs[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locs[addr] = loc + p.Location = append(p.Location, loc) + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, + &Sample{ + Value: []int64{int64(count), int64(count) * p.Period}, + Location: sloc, + }) + } + // Reached the end without finding the EOD marker. + return b, locs, nil +} + +// parseHeap parses a heapz legacy or a growthz profile and +// returns a newly populated Profile. +func parseHeap(b []byte) (p *Profile, err error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + p = &Profile{} + + sampling := "" + hasAlloc := false + + line := s.Text() + p.PeriodType = &ValueType{Type: "space", Unit: "bytes"} + if header := heapHeaderRE.FindStringSubmatch(line); header != nil { + sampling, p.Period, hasAlloc, err = parseHeapHeader(line) + if err != nil { + return nil, err + } + } else if header = growthHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else if header = fragmentationHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else { + return nil, errUnrecognized + } + + if hasAlloc { + // Put alloc before inuse so that default pprof selection + // will prefer inuse_space. + p.SampleType = []*ValueType{ + {Type: "alloc_objects", Unit: "count"}, + {Type: "alloc_space", Unit: "bytes"}, + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: "bytes"}, + } + } else { + p.SampleType = []*ValueType{ + {Type: "objects", Unit: "count"}, + {Type: "space", Unit: "bytes"}, + } + } + + locs := make(map[uint64]*Location) + for s.Scan() { + line := strings.TrimSpace(s.Text()) + + if isSpaceOrComment(line) { + continue + } + + if isMemoryMapSentinel(line) { + break + } + + value, blocksize, addrs, err := parseHeapSample(line, p.Period, sampling, hasAlloc) + if err != nil { + return nil, err + } + + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + NumLabel: map[string][]int64{"bytes": {blocksize}}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +func parseHeapHeader(line string) (sampling string, period int64, hasAlloc bool, err error) { + header := heapHeaderRE.FindStringSubmatch(line) + if header == nil { + return "", 0, false, errUnrecognized + } + + if len(header[6]) > 0 { + if period, err = strconv.ParseInt(header[6], 10, 64); err != nil { + return "", 0, false, errUnrecognized + } + } + + if (header[3] != header[1] && header[3] != "0") || (header[4] != header[2] && header[4] != "0") { + hasAlloc = true + } + + switch header[5] { + case "heapz_v2", "heap_v2": + return "v2", period, hasAlloc, nil + case "heapprofile": + return "", 1, hasAlloc, nil + case "heap": + return "v2", period / 2, hasAlloc, nil + default: + return "", 0, false, errUnrecognized + } +} + +// parseHeapSample parses a single row from a heap profile into a new Sample. +func parseHeapSample(line string, rate int64, sampling string, includeAlloc bool) (value []int64, blocksize int64, addrs []uint64, err error) { + sampleData := heapSampleRE.FindStringSubmatch(line) + if len(sampleData) != 6 { + return nil, 0, nil, fmt.Errorf("unexpected number of sample values: got %d, want 6", len(sampleData)) + } + + // This is a local-scoped helper function to avoid needing to pass + // around rate, sampling and many return parameters. + addValues := func(countString, sizeString string, label string) error { + count, err := strconv.ParseInt(countString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + size, err := strconv.ParseInt(sizeString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + if count == 0 && size != 0 { + return fmt.Errorf("%s count was 0 but %s bytes was %d", label, label, size) + } + if count != 0 { + blocksize = size / count + if sampling == "v2" { + count, size = scaleHeapSample(count, size, rate) + } + } + value = append(value, count, size) + return nil + } + + if includeAlloc { + if err := addValues(sampleData[3], sampleData[4], "allocation"); err != nil { + return nil, 0, nil, err + } + } + + if err := addValues(sampleData[1], sampleData[2], "inuse"); err != nil { + return nil, 0, nil, err + } + + addrs, err = parseHexAddresses(sampleData[5]) + if err != nil { + return nil, 0, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, blocksize, addrs, nil +} + +// parseHexAddresses extracts hex numbers from a string, attempts to convert +// each to an unsigned 64-bit number and returns the resulting numbers as a +// slice, or an error if the string contains hex numbers which are too large to +// handle (which means a malformed profile). +func parseHexAddresses(s string) ([]uint64, error) { + hexStrings := hexNumberRE.FindAllString(s, -1) + var addrs []uint64 + for _, s := range hexStrings { + if addr, err := strconv.ParseUint(s, 0, 64); err == nil { + addrs = append(addrs, addr) + } else { + return nil, fmt.Errorf("failed to parse as hex 64-bit number: %s", s) + } + } + return addrs, nil +} + +// scaleHeapSample adjusts the data from a heapz Sample to +// account for its probability of appearing in the collected +// data. heapz profiles are a sampling of the memory allocations +// requests in a program. We estimate the unsampled value by dividing +// each collected sample by its probability of appearing in the +// profile. heapz v2 profiles rely on a poisson process to determine +// which samples to collect, based on the desired average collection +// rate R. The probability of a sample of size S to appear in that +// profile is 1-exp(-S/R). +func scaleHeapSample(count, size, rate int64) (int64, int64) { + if count == 0 || size == 0 { + return 0, 0 + } + + if rate <= 1 { + // if rate==1 all samples were collected so no adjustment is needed. + // if rate<1 treat as unknown and skip scaling. + return count, size + } + + avgSize := float64(size) / float64(count) + scale := 1 / (1 - math.Exp(-avgSize/float64(rate))) + + return int64(float64(count) * scale), int64(float64(size) * scale) +} + +// parseContention parses a mutex or contention profile. There are 2 cases: +// "--- contentionz " for legacy C++ profiles (and backwards compatibility) +// "--- mutex:" or "--- contention:" for profiles generated by the Go runtime. +func parseContention(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + + switch l := s.Text(); { + case strings.HasPrefix(l, "--- contentionz "): + case strings.HasPrefix(l, "--- mutex:"): + case strings.HasPrefix(l, "--- contention:"): + default: + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{Type: "contentions", Unit: "count"}, + Period: 1, + SampleType: []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: "nanoseconds"}, + }, + } + + var cpuHz int64 + // Parse text of the form "attribute = value" before the samples. + const delimiter = "=" + for s.Scan() { + line := s.Text() + if line = strings.TrimSpace(line); isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + attr := strings.SplitN(line, delimiter, 2) + if len(attr) != 2 { + break + } + key, val := strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1]) + var err error + switch key { + case "cycles/second": + if cpuHz, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "sampling period": + if p.Period, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "ms since reset": + ms, err := strconv.ParseInt(val, 0, 64) + if err != nil { + return nil, errUnrecognized + } + p.DurationNanos = ms * 1000 * 1000 + case "format": + // CPP contentionz profiles don't have format. + return nil, errUnrecognized + case "resolution": + // CPP contentionz profiles don't have resolution. + return nil, errUnrecognized + case "discarded samples": + default: + return nil, errUnrecognized + } + } + if err := s.Err(); err != nil { + return nil, err + } + + locs := make(map[uint64]*Location) + for { + line := strings.TrimSpace(s.Text()) + if strings.HasPrefix(line, "---") { + break + } + if !isSpaceOrComment(line) { + value, addrs, err := parseContentionSample(line, p.Period, cpuHz) + if err != nil { + return nil, err + } + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + }) + } + if !s.Scan() { + break + } + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + return p, nil +} + +// parseContentionSample parses a single row from a contention profile +// into a new Sample. +func parseContentionSample(line string, period, cpuHz int64) (value []int64, addrs []uint64, err error) { + sampleData := contentionSampleRE.FindStringSubmatch(line) + if sampleData == nil { + return nil, nil, errUnrecognized + } + + v1, err := strconv.ParseInt(sampleData[1], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + v2, err := strconv.ParseInt(sampleData[2], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + // Unsample values if period and cpuHz are available. + // - Delays are scaled to cycles and then to nanoseconds. + // - Contentions are scaled to cycles. + if period > 0 { + if cpuHz > 0 { + cpuGHz := float64(cpuHz) / 1e9 + v1 = int64(float64(v1) * float64(period) / cpuGHz) + } + v2 = v2 * period + } + + value = []int64{v2, v1} + addrs, err = parseHexAddresses(sampleData[3]) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, addrs, nil +} + +// parseThread parses a Threadz profile and returns a new Profile. +func parseThread(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip past comments and empty lines seeking a real header. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + + line := s.Text() + if m := threadzStartRE.FindStringSubmatch(line); m != nil { + // Advance over initial comments until first stack trace. + for s.Scan() { + if line = s.Text(); isMemoryMapSentinel(line) || strings.HasPrefix(line, "-") { + break + } + } + } else if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + p := &Profile{ + SampleType: []*ValueType{{Type: "thread", Unit: "count"}}, + PeriodType: &ValueType{Type: "thread", Unit: "count"}, + Period: 1, + } + + locs := make(map[uint64]*Location) + // Recognize each thread and populate profile samples. + for !isMemoryMapSentinel(line) { + if strings.HasPrefix(line, "---- no stack trace for") { + break + } + if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + var addrs []uint64 + var err error + line, addrs, err = parseThreadSample(s) + if err != nil { + return nil, err + } + if len(addrs) == 0 { + // We got a --same as previous threads--. Bump counters. + if len(p.Sample) > 0 { + s := p.Sample[len(p.Sample)-1] + s.Value[0]++ + } + continue + } + + var sloc []*Location + for i, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call + // (except for the leaf, which is not a call). + if i > 0 { + addr-- + } + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: []int64{1}, + Location: sloc, + }) + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +// parseThreadSample parses a symbolized or unsymbolized stack trace. +// Returns the first line after the traceback, the sample (or nil if +// it hits a 'same-as-previous' marker) and an error. +func parseThreadSample(s *bufio.Scanner) (nextl string, addrs []uint64, err error) { + var line string + sameAsPrevious := false + for s.Scan() { + line = strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + if strings.HasPrefix(line, "---") { + break + } + if strings.Contains(line, "same as previous thread") { + sameAsPrevious = true + continue + } + + curAddrs, err := parseHexAddresses(line) + if err != nil { + return "", nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + addrs = append(addrs, curAddrs...) + } + if err := s.Err(); err != nil { + return "", nil, err + } + if sameAsPrevious { + return line, nil, nil + } + return line, addrs, nil +} + +// parseAdditionalSections parses any additional sections in the +// profile, ignoring any unrecognized sections. +func parseAdditionalSections(s *bufio.Scanner, p *Profile) error { + for !isMemoryMapSentinel(s.Text()) && s.Scan() { + } + if err := s.Err(); err != nil { + return err + } + return p.ParseMemoryMapFromScanner(s) +} + +// ParseProcMaps parses a memory map in the format of /proc/self/maps. +// ParseMemoryMap should be called after setting on a profile to +// associate locations to the corresponding mapping based on their +// address. +func ParseProcMaps(rd io.Reader) ([]*Mapping, error) { + s := bufio.NewScanner(rd) + return parseProcMapsFromScanner(s) +} + +func parseProcMapsFromScanner(s *bufio.Scanner) ([]*Mapping, error) { + var mapping []*Mapping + + var attrs []string + const delimiter = "=" + r := strings.NewReplacer() + for s.Scan() { + line := r.Replace(removeLoggingInfo(s.Text())) + m, err := parseMappingEntry(line) + if err != nil { + if err == errUnrecognized { + // Recognize assignments of the form: attr=value, and replace + // $attr with value on subsequent mappings. + if attr := strings.SplitN(line, delimiter, 2); len(attr) == 2 { + attrs = append(attrs, "$"+strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1])) + r = strings.NewReplacer(attrs...) + } + // Ignore any unrecognized entries + continue + } + return nil, err + } + if m == nil { + continue + } + mapping = append(mapping, m) + } + if err := s.Err(); err != nil { + return nil, err + } + return mapping, nil +} + +// removeLoggingInfo detects and removes log prefix entries generated +// by the glog package. If no logging prefix is detected, the string +// is returned unmodified. +func removeLoggingInfo(line string) string { + if match := logInfoRE.FindStringIndex(line); match != nil { + return line[match[1]:] + } + return line +} + +// ParseMemoryMap parses a memory map in the format of +// /proc/self/maps, and overrides the mappings in the current profile. +// It renumbers the samples and locations in the profile correspondingly. +func (p *Profile) ParseMemoryMap(rd io.Reader) error { + return p.ParseMemoryMapFromScanner(bufio.NewScanner(rd)) +} + +// ParseMemoryMapFromScanner parses a memory map in the format of +// /proc/self/maps or a variety of legacy format, and overrides the +// mappings in the current profile. It renumbers the samples and +// locations in the profile correspondingly. +func (p *Profile) ParseMemoryMapFromScanner(s *bufio.Scanner) error { + mapping, err := parseProcMapsFromScanner(s) + if err != nil { + return err + } + p.Mapping = append(p.Mapping, mapping...) + p.massageMappings() + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + return nil +} + +func parseMappingEntry(l string) (*Mapping, error) { + var start, end, perm, file, offset, buildID string + if me := procMapsRE.FindStringSubmatch(l); len(me) == 6 { + start, end, perm, offset, file = me[1], me[2], me[3], me[4], me[5] + } else if me := briefMapsRE.FindStringSubmatch(l); len(me) == 7 { + start, end, perm, file, offset, buildID = me[1], me[2], me[3], me[4], me[5], me[6] + } else { + return nil, errUnrecognized + } + + var err error + mapping := &Mapping{ + File: file, + BuildID: buildID, + } + if perm != "" && !strings.Contains(perm, "x") { + // Skip non-executable entries. + return nil, nil + } + if mapping.Start, err = strconv.ParseUint(start, 16, 64); err != nil { + return nil, errUnrecognized + } + if mapping.Limit, err = strconv.ParseUint(end, 16, 64); err != nil { + return nil, errUnrecognized + } + if offset != "" { + if mapping.Offset, err = strconv.ParseUint(offset, 16, 64); err != nil { + return nil, errUnrecognized + } + } + return mapping, nil +} + +var memoryMapSentinels = []string{ + "--- Memory map: ---", + "MAPPED_LIBRARIES:", +} + +// isMemoryMapSentinel returns true if the string contains one of the +// known sentinels for memory map information. +func isMemoryMapSentinel(line string) bool { + for _, s := range memoryMapSentinels { + if strings.Contains(line, s) { + return true + } + } + return false +} + +func (p *Profile) addLegacyFrameInfo() { + switch { + case isProfileType(p, heapzSampleTypes): + p.DropFrames, p.KeepFrames = allocRxStr, allocSkipRxStr + case isProfileType(p, contentionzSampleTypes): + p.DropFrames, p.KeepFrames = lockRxStr, "" + default: + p.DropFrames, p.KeepFrames = cpuProfilerRxStr, "" + } +} + +var heapzSampleTypes = [][]string{ + {"allocations", "size"}, // early Go pprof profiles + {"objects", "space"}, + {"inuse_objects", "inuse_space"}, + {"alloc_objects", "alloc_space"}, + {"alloc_objects", "alloc_space", "inuse_objects", "inuse_space"}, // Go pprof legacy profiles +} +var contentionzSampleTypes = [][]string{ + {"contentions", "delay"}, +} + +func isProfileType(p *Profile, types [][]string) bool { + st := p.SampleType +nextType: + for _, t := range types { + if len(st) != len(t) { + continue + } + + for i := range st { + if st[i].Type != t[i] { + continue nextType + } + } + return true + } + return false +} + +var allocRxStr = strings.Join([]string{ + // POSIX entry points. + `calloc`, + `cfree`, + `malloc`, + `free`, + `memalign`, + `do_memalign`, + `(__)?posix_memalign`, + `pvalloc`, + `valloc`, + `realloc`, + + // TC malloc. + `tcmalloc::.*`, + `tc_calloc`, + `tc_cfree`, + `tc_malloc`, + `tc_free`, + `tc_memalign`, + `tc_posix_memalign`, + `tc_pvalloc`, + `tc_valloc`, + `tc_realloc`, + `tc_new`, + `tc_delete`, + `tc_newarray`, + `tc_deletearray`, + `tc_new_nothrow`, + `tc_newarray_nothrow`, + + // Memory-allocation routines on OS X. + `malloc_zone_malloc`, + `malloc_zone_calloc`, + `malloc_zone_valloc`, + `malloc_zone_realloc`, + `malloc_zone_memalign`, + `malloc_zone_free`, + + // Go runtime + `runtime\..*`, + + // Other misc. memory allocation routines + `BaseArena::.*`, + `(::)?do_malloc_no_errno`, + `(::)?do_malloc_pages`, + `(::)?do_malloc`, + `DoSampledAllocation`, + `MallocedMemBlock::MallocedMemBlock`, + `_M_allocate`, + `__builtin_(vec_)?delete`, + `__builtin_(vec_)?new`, + `__gnu_cxx::new_allocator::allocate`, + `__libc_malloc`, + `__malloc_alloc_template::allocate`, + `allocate`, + `cpp_alloc`, + `operator new(\[\])?`, + `simple_alloc::allocate`, +}, `|`) + +var allocSkipRxStr = strings.Join([]string{ + // Preserve Go runtime frames that appear in the middle/bottom of + // the stack. + `runtime\.panic`, + `runtime\.reflectcall`, + `runtime\.call[0-9]*`, +}, `|`) + +var cpuProfilerRxStr = strings.Join([]string{ + `ProfileData::Add`, + `ProfileData::prof_handler`, + `CpuProfiler::prof_handler`, + `__pthread_sighandler`, + `__restore`, +}, `|`) + +var lockRxStr = strings.Join([]string{ + `RecordLockProfileData`, + `(base::)?RecordLockProfileData.*`, + `(base::)?SubmitMutexProfileData.*`, + `(base::)?SubmitSpinLockProfileData.*`, + `(base::Mutex::)?AwaitCommon.*`, + `(base::Mutex::)?Unlock.*`, + `(base::Mutex::)?UnlockSlow.*`, + `(base::Mutex::)?ReaderUnlock.*`, + `(base::MutexLock::)?~MutexLock.*`, + `(Mutex::)?AwaitCommon.*`, + `(Mutex::)?Unlock.*`, + `(Mutex::)?UnlockSlow.*`, + `(Mutex::)?ReaderUnlock.*`, + `(MutexLock::)?~MutexLock.*`, + `(SpinLock::)?Unlock.*`, + `(SpinLock::)?SlowUnlock.*`, + `(SpinLockHolder::)?~SpinLockHolder.*`, +}, `|`) diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go new file mode 100644 index 00000000..eee0132e --- /dev/null +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -0,0 +1,669 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "encoding/binary" + "fmt" + "sort" + "strconv" + "strings" +) + +// Compact performs garbage collection on a profile to remove any +// unreferenced fields. This is useful to reduce the size of a profile +// after samples or locations have been removed. +func (p *Profile) Compact() *Profile { + p, _ = Merge([]*Profile{p}) + return p +} + +// Merge merges all the profiles in profs into a single Profile. +// Returns a new profile independent of the input profiles. The merged +// profile is compacted to eliminate unused samples, locations, +// functions and mappings. Profiles must have identical profile sample +// and period types or the merge will fail. profile.Period of the +// resulting profile will be the maximum of all profiles, and +// profile.TimeNanos will be the earliest nonzero one. Merges are +// associative with the caveat of the first profile having some +// specialization in how headers are combined. There may be other +// subtleties now or in the future regarding associativity. +func Merge(srcs []*Profile) (*Profile, error) { + if len(srcs) == 0 { + return nil, fmt.Errorf("no profiles to merge") + } + p, err := combineHeaders(srcs) + if err != nil { + return nil, err + } + + pm := &profileMerger{ + p: p, + samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)), + locations: make(map[locationKey]*Location, len(srcs[0].Location)), + functions: make(map[functionKey]*Function, len(srcs[0].Function)), + mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)), + } + + for _, src := range srcs { + // Clear the profile-specific hash tables + pm.locationsByID = makeLocationIDMap(len(src.Location)) + pm.functionsByID = make(map[uint64]*Function, len(src.Function)) + pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping)) + + if len(pm.mappings) == 0 && len(src.Mapping) > 0 { + // The Mapping list has the property that the first mapping + // represents the main binary. Take the first Mapping we see, + // otherwise the operations below will add mappings in an + // arbitrary order. + pm.mapMapping(src.Mapping[0]) + } + + for _, s := range src.Sample { + if !isZeroSample(s) { + pm.mapSample(s) + } + } + } + + for _, s := range p.Sample { + if isZeroSample(s) { + // If there are any zero samples, re-merge the profile to GC + // them. + return Merge([]*Profile{p}) + } + } + + return p, nil +} + +// Normalize normalizes the source profile by multiplying each value in profile by the +// ratio of the sum of the base profile's values of that sample type to the sum of the +// source profile's value of that sample type. +func (p *Profile) Normalize(pb *Profile) error { + + if err := p.compatible(pb); err != nil { + return err + } + + baseVals := make([]int64, len(p.SampleType)) + for _, s := range pb.Sample { + for i, v := range s.Value { + baseVals[i] += v + } + } + + srcVals := make([]int64, len(p.SampleType)) + for _, s := range p.Sample { + for i, v := range s.Value { + srcVals[i] += v + } + } + + normScale := make([]float64, len(baseVals)) + for i := range baseVals { + if srcVals[i] == 0 { + normScale[i] = 0.0 + } else { + normScale[i] = float64(baseVals[i]) / float64(srcVals[i]) + } + } + p.ScaleN(normScale) + return nil +} + +func isZeroSample(s *Sample) bool { + for _, v := range s.Value { + if v != 0 { + return false + } + } + return true +} + +type profileMerger struct { + p *Profile + + // Memoization tables within a profile. + locationsByID locationIDMap + functionsByID map[uint64]*Function + mappingsByID map[uint64]mapInfo + + // Memoization tables for profile entities. + samples map[sampleKey]*Sample + locations map[locationKey]*Location + functions map[functionKey]*Function + mappings map[mappingKey]*Mapping +} + +type mapInfo struct { + m *Mapping + offset int64 +} + +func (pm *profileMerger) mapSample(src *Sample) *Sample { + // Check memoization table + k := pm.sampleKey(src) + if ss, ok := pm.samples[k]; ok { + for i, v := range src.Value { + ss.Value[i] += v + } + return ss + } + + // Make new sample. + s := &Sample{ + Location: make([]*Location, len(src.Location)), + Value: make([]int64, len(src.Value)), + Label: make(map[string][]string, len(src.Label)), + NumLabel: make(map[string][]int64, len(src.NumLabel)), + NumUnit: make(map[string][]string, len(src.NumLabel)), + } + for i, l := range src.Location { + s.Location[i] = pm.mapLocation(l) + } + for k, v := range src.Label { + vv := make([]string, len(v)) + copy(vv, v) + s.Label[k] = vv + } + for k, v := range src.NumLabel { + u := src.NumUnit[k] + vv := make([]int64, len(v)) + uu := make([]string, len(u)) + copy(vv, v) + copy(uu, u) + s.NumLabel[k] = vv + s.NumUnit[k] = uu + } + copy(s.Value, src.Value) + pm.samples[k] = s + pm.p.Sample = append(pm.p.Sample, s) + return s +} + +func (pm *profileMerger) sampleKey(sample *Sample) sampleKey { + // Accumulate contents into a string. + var buf strings.Builder + buf.Grow(64) // Heuristic to avoid extra allocs + + // encode a number + putNumber := func(v uint64) { + var num [binary.MaxVarintLen64]byte + n := binary.PutUvarint(num[:], v) + buf.Write(num[:n]) + } + + // encode a string prefixed with its length. + putDelimitedString := func(s string) { + putNumber(uint64(len(s))) + buf.WriteString(s) + } + + for _, l := range sample.Location { + // Get the location in the merged profile, which may have a different ID. + if loc := pm.mapLocation(l); loc != nil { + putNumber(loc.ID) + } + } + putNumber(0) // Delimiter + + for _, l := range sortedKeys1(sample.Label) { + putDelimitedString(l) + values := sample.Label[l] + putNumber(uint64(len(values))) + for _, v := range values { + putDelimitedString(v) + } + } + + for _, l := range sortedKeys2(sample.NumLabel) { + putDelimitedString(l) + values := sample.NumLabel[l] + putNumber(uint64(len(values))) + for _, v := range values { + putNumber(uint64(v)) + } + units := sample.NumUnit[l] + putNumber(uint64(len(units))) + for _, v := range units { + putDelimitedString(v) + } + } + + return sampleKey(buf.String()) +} + +type sampleKey string + +// sortedKeys1 returns the sorted keys found in a string->[]string map. +// +// Note: this is currently non-generic since github pprof runs golint, +// which does not support generics. When that issue is fixed, it can +// be merged with sortedKeys2 and made into a generic function. +func sortedKeys1(m map[string][]string) []string { + if len(m) == 0 { + return nil + } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// sortedKeys2 returns the sorted keys found in a string->[]int64 map. +// +// Note: this is currently non-generic since github pprof runs golint, +// which does not support generics. When that issue is fixed, it can +// be merged with sortedKeys1 and made into a generic function. +func sortedKeys2(m map[string][]int64) []string { + if len(m) == 0 { + return nil + } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +func (pm *profileMerger) mapLocation(src *Location) *Location { + if src == nil { + return nil + } + + if l := pm.locationsByID.get(src.ID); l != nil { + return l + } + + mi := pm.mapMapping(src.Mapping) + l := &Location{ + ID: uint64(len(pm.p.Location) + 1), + Mapping: mi.m, + Address: uint64(int64(src.Address) + mi.offset), + Line: make([]Line, len(src.Line)), + IsFolded: src.IsFolded, + } + for i, ln := range src.Line { + l.Line[i] = pm.mapLine(ln) + } + // Check memoization table. Must be done on the remapped location to + // account for the remapped mapping ID. + k := l.key() + if ll, ok := pm.locations[k]; ok { + pm.locationsByID.set(src.ID, ll) + return ll + } + pm.locationsByID.set(src.ID, l) + pm.locations[k] = l + pm.p.Location = append(pm.p.Location, l) + return l +} + +// key generates locationKey to be used as a key for maps. +func (l *Location) key() locationKey { + key := locationKey{ + addr: l.Address, + isFolded: l.IsFolded, + } + if l.Mapping != nil { + // Normalizes address to handle address space randomization. + key.addr -= l.Mapping.Start + key.mappingID = l.Mapping.ID + } + lines := make([]string, len(l.Line)*3) + for i, line := range l.Line { + if line.Function != nil { + lines[i*2] = strconv.FormatUint(line.Function.ID, 16) + } + lines[i*2+1] = strconv.FormatInt(line.Line, 16) + lines[i*2+2] = strconv.FormatInt(line.Column, 16) + } + key.lines = strings.Join(lines, "|") + return key +} + +type locationKey struct { + addr, mappingID uint64 + lines string + isFolded bool +} + +func (pm *profileMerger) mapMapping(src *Mapping) mapInfo { + if src == nil { + return mapInfo{} + } + + if mi, ok := pm.mappingsByID[src.ID]; ok { + return mi + } + + // Check memoization tables. + mk := src.key() + if m, ok := pm.mappings[mk]; ok { + mi := mapInfo{m, int64(m.Start) - int64(src.Start)} + pm.mappingsByID[src.ID] = mi + return mi + } + m := &Mapping{ + ID: uint64(len(pm.p.Mapping) + 1), + Start: src.Start, + Limit: src.Limit, + Offset: src.Offset, + File: src.File, + KernelRelocationSymbol: src.KernelRelocationSymbol, + BuildID: src.BuildID, + HasFunctions: src.HasFunctions, + HasFilenames: src.HasFilenames, + HasLineNumbers: src.HasLineNumbers, + HasInlineFrames: src.HasInlineFrames, + } + pm.p.Mapping = append(pm.p.Mapping, m) + + // Update memoization tables. + pm.mappings[mk] = m + mi := mapInfo{m, 0} + pm.mappingsByID[src.ID] = mi + return mi +} + +// key generates encoded strings of Mapping to be used as a key for +// maps. +func (m *Mapping) key() mappingKey { + // Normalize addresses to handle address space randomization. + // Round up to next 4K boundary to avoid minor discrepancies. + const mapsizeRounding = 0x1000 + + size := m.Limit - m.Start + size = size + mapsizeRounding - 1 + size = size - (size % mapsizeRounding) + key := mappingKey{ + size: size, + offset: m.Offset, + } + + switch { + case m.BuildID != "": + key.buildIDOrFile = m.BuildID + case m.File != "": + key.buildIDOrFile = m.File + default: + // A mapping containing neither build ID nor file name is a fake mapping. A + // key with empty buildIDOrFile is used for fake mappings so that they are + // treated as the same mapping during merging. + } + return key +} + +type mappingKey struct { + size, offset uint64 + buildIDOrFile string +} + +func (pm *profileMerger) mapLine(src Line) Line { + ln := Line{ + Function: pm.mapFunction(src.Function), + Line: src.Line, + Column: src.Column, + } + return ln +} + +func (pm *profileMerger) mapFunction(src *Function) *Function { + if src == nil { + return nil + } + if f, ok := pm.functionsByID[src.ID]; ok { + return f + } + k := src.key() + if f, ok := pm.functions[k]; ok { + pm.functionsByID[src.ID] = f + return f + } + f := &Function{ + ID: uint64(len(pm.p.Function) + 1), + Name: src.Name, + SystemName: src.SystemName, + Filename: src.Filename, + StartLine: src.StartLine, + } + pm.functions[k] = f + pm.functionsByID[src.ID] = f + pm.p.Function = append(pm.p.Function, f) + return f +} + +// key generates a struct to be used as a key for maps. +func (f *Function) key() functionKey { + return functionKey{ + f.StartLine, + f.Name, + f.SystemName, + f.Filename, + } +} + +type functionKey struct { + startLine int64 + name, systemName, fileName string +} + +// combineHeaders checks that all profiles can be merged and returns +// their combined profile. +func combineHeaders(srcs []*Profile) (*Profile, error) { + for _, s := range srcs[1:] { + if err := srcs[0].compatible(s); err != nil { + return nil, err + } + } + + var timeNanos, durationNanos, period int64 + var comments []string + seenComments := map[string]bool{} + var defaultSampleType string + for _, s := range srcs { + if timeNanos == 0 || s.TimeNanos < timeNanos { + timeNanos = s.TimeNanos + } + durationNanos += s.DurationNanos + if period == 0 || period < s.Period { + period = s.Period + } + for _, c := range s.Comments { + if seen := seenComments[c]; !seen { + comments = append(comments, c) + seenComments[c] = true + } + } + if defaultSampleType == "" { + defaultSampleType = s.DefaultSampleType + } + } + + p := &Profile{ + SampleType: make([]*ValueType, len(srcs[0].SampleType)), + + DropFrames: srcs[0].DropFrames, + KeepFrames: srcs[0].KeepFrames, + + TimeNanos: timeNanos, + DurationNanos: durationNanos, + PeriodType: srcs[0].PeriodType, + Period: period, + + Comments: comments, + DefaultSampleType: defaultSampleType, + } + copy(p.SampleType, srcs[0].SampleType) + return p, nil +} + +// compatible determines if two profiles can be compared/merged. +// returns nil if the profiles are compatible; otherwise an error with +// details on the incompatibility. +func (p *Profile) compatible(pb *Profile) error { + if !equalValueType(p.PeriodType, pb.PeriodType) { + return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType) + } + + if len(p.SampleType) != len(pb.SampleType) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + + for i := range p.SampleType { + if !equalValueType(p.SampleType[i], pb.SampleType[i]) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + } + return nil +} + +// equalValueType returns true if the two value types are semantically +// equal. It ignores the internal fields used during encode/decode. +func equalValueType(st1, st2 *ValueType) bool { + return st1.Type == st2.Type && st1.Unit == st2.Unit +} + +// locationIDMap is like a map[uint64]*Location, but provides efficiency for +// ids that are densely numbered, which is often the case. +type locationIDMap struct { + dense []*Location // indexed by id for id < len(dense) + sparse map[uint64]*Location // indexed by id for id >= len(dense) +} + +func makeLocationIDMap(n int) locationIDMap { + return locationIDMap{ + dense: make([]*Location, n), + sparse: map[uint64]*Location{}, + } +} + +func (lm locationIDMap) get(id uint64) *Location { + if id < uint64(len(lm.dense)) { + return lm.dense[int(id)] + } + return lm.sparse[id] +} + +func (lm locationIDMap) set(id uint64, loc *Location) { + if id < uint64(len(lm.dense)) { + lm.dense[id] = loc + return + } + lm.sparse[id] = loc +} + +// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It +// keeps sample types that appear in all profiles only and drops/reorders the +// sample types as necessary. +// +// In the case of sample types order is not the same for given profiles the +// order is derived from the first profile. +// +// Profiles are modified in-place. +// +// It returns an error if the sample type's intersection is empty. +func CompatibilizeSampleTypes(ps []*Profile) error { + sTypes := commonSampleTypes(ps) + if len(sTypes) == 0 { + return fmt.Errorf("profiles have empty common sample type list") + } + for _, p := range ps { + if err := compatibilizeSampleTypes(p, sTypes); err != nil { + return err + } + } + return nil +} + +// commonSampleTypes returns sample types that appear in all profiles in the +// order how they ordered in the first profile. +func commonSampleTypes(ps []*Profile) []string { + if len(ps) == 0 { + return nil + } + sTypes := map[string]int{} + for _, p := range ps { + for _, st := range p.SampleType { + sTypes[st.Type]++ + } + } + var res []string + for _, st := range ps[0].SampleType { + if sTypes[st.Type] == len(ps) { + res = append(res, st.Type) + } + } + return res +} + +// compatibilizeSampleTypes drops sample types that are not present in sTypes +// list and reorder them if needed. +// +// It sets DefaultSampleType to sType[0] if it is not in sType list. +// +// It assumes that all sample types from the sTypes list are present in the +// given profile otherwise it returns an error. +func compatibilizeSampleTypes(p *Profile, sTypes []string) error { + if len(sTypes) == 0 { + return fmt.Errorf("sample type list is empty") + } + defaultSampleType := sTypes[0] + reMap, needToModify := make([]int, len(sTypes)), false + for i, st := range sTypes { + if st == p.DefaultSampleType { + defaultSampleType = p.DefaultSampleType + } + idx := searchValueType(p.SampleType, st) + if idx < 0 { + return fmt.Errorf("%q sample type is not found in profile", st) + } + reMap[i] = idx + if idx != i { + needToModify = true + } + } + if !needToModify && len(sTypes) == len(p.SampleType) { + return nil + } + p.DefaultSampleType = defaultSampleType + oldSampleTypes := p.SampleType + p.SampleType = make([]*ValueType, len(sTypes)) + for i, idx := range reMap { + p.SampleType[i] = oldSampleTypes[idx] + } + values := make([]int64, len(sTypes)) + for _, s := range p.Sample { + for i, idx := range reMap { + values[i] = s.Value[idx] + } + s.Value = s.Value[:len(values)] + copy(s.Value, values) + } + return nil +} + +func searchValueType(vts []*ValueType, s string) int { + for i, vt := range vts { + if vt.Type == s { + return i + } + } + return -1 +} diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go new file mode 100644 index 00000000..62df80a5 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -0,0 +1,864 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package profile provides a representation of profile.proto and +// methods to encode/decode profiles in this format. +package profile + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "math" + "path/filepath" + "regexp" + "sort" + "strings" + "sync" + "time" +) + +// Profile is an in-memory representation of profile.proto. +type Profile struct { + SampleType []*ValueType + DefaultSampleType string + Sample []*Sample + Mapping []*Mapping + Location []*Location + Function []*Function + Comments []string + + DropFrames string + KeepFrames string + + TimeNanos int64 + DurationNanos int64 + PeriodType *ValueType + Period int64 + + // The following fields are modified during encoding and copying, + // so are protected by a Mutex. + encodeMu sync.Mutex + + commentX []int64 + dropFramesX int64 + keepFramesX int64 + stringTable []string + defaultSampleTypeX int64 +} + +// ValueType corresponds to Profile.ValueType +type ValueType struct { + Type string // cpu, wall, inuse_space, etc + Unit string // seconds, nanoseconds, bytes, etc + + typeX int64 + unitX int64 +} + +// Sample corresponds to Profile.Sample +type Sample struct { + Location []*Location + Value []int64 + // Label is a per-label-key map to values for string labels. + // + // In general, having multiple values for the given label key is strongly + // discouraged - see docs for the sample label field in profile.proto. The + // main reason this unlikely state is tracked here is to make the + // decoding->encoding roundtrip not lossy. But we expect that the value + // slices present in this map are always of length 1. + Label map[string][]string + // NumLabel is a per-label-key map to values for numeric labels. See a note + // above on handling multiple values for a label. + NumLabel map[string][]int64 + // NumUnit is a per-label-key map to the unit names of corresponding numeric + // label values. The unit info may be missing even if the label is in + // NumLabel, see the docs in profile.proto for details. When the value is + // slice is present and not nil, its length must be equal to the length of + // the corresponding value slice in NumLabel. + NumUnit map[string][]string + + locationIDX []uint64 + labelX []label +} + +// label corresponds to Profile.Label +type label struct { + keyX int64 + // Exactly one of the two following values must be set + strX int64 + numX int64 // Integer value for this label + // can be set if numX has value + unitX int64 +} + +// Mapping corresponds to Profile.Mapping +type Mapping struct { + ID uint64 + Start uint64 + Limit uint64 + Offset uint64 + File string + BuildID string + HasFunctions bool + HasFilenames bool + HasLineNumbers bool + HasInlineFrames bool + + fileX int64 + buildIDX int64 + + // Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File. + // For linux kernel mappings generated by some tools, correct symbolization depends + // on knowing which of the two possible relocation symbols was used for `Start`. + // This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext"). + // + // Note, this public field is not persisted in the proto. For the purposes of + // copying / merging / hashing profiles, it is considered subsumed by `File`. + KernelRelocationSymbol string +} + +// Location corresponds to Profile.Location +type Location struct { + ID uint64 + Mapping *Mapping + Address uint64 + Line []Line + IsFolded bool + + mappingIDX uint64 +} + +// Line corresponds to Profile.Line +type Line struct { + Function *Function + Line int64 + Column int64 + + functionIDX uint64 +} + +// Function corresponds to Profile.Function +type Function struct { + ID uint64 + Name string + SystemName string + Filename string + StartLine int64 + + nameX int64 + systemNameX int64 + filenameX int64 +} + +// Parse parses a profile and checks for its validity. The input +// may be a gzip-compressed encoded protobuf or one of many legacy +// profile formats which may be unsupported in the future. +func Parse(r io.Reader) (*Profile, error) { + data, err := io.ReadAll(r) + if err != nil { + return nil, err + } + return ParseData(data) +} + +// ParseData parses a profile from a buffer and checks for its +// validity. +func ParseData(data []byte) (*Profile, error) { + var p *Profile + var err error + if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err == nil { + data, err = io.ReadAll(gz) + } + if err != nil { + return nil, fmt.Errorf("decompressing profile: %v", err) + } + } + if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile { + p, err = parseLegacy(data) + } + + if err != nil { + return nil, fmt.Errorf("parsing profile: %v", err) + } + + if err := p.CheckValid(); err != nil { + return nil, fmt.Errorf("malformed profile: %v", err) + } + return p, nil +} + +var errUnrecognized = fmt.Errorf("unrecognized profile format") +var errMalformed = fmt.Errorf("malformed profile format") +var errNoData = fmt.Errorf("empty input file") +var errConcatProfile = fmt.Errorf("concatenated profiles detected") + +func parseLegacy(data []byte) (*Profile, error) { + parsers := []func([]byte) (*Profile, error){ + parseCPU, + parseHeap, + parseGoCount, // goroutine, threadcreate + parseThread, + parseContention, + parseJavaProfile, + } + + for _, parser := range parsers { + p, err := parser(data) + if err == nil { + p.addLegacyFrameInfo() + return p, nil + } + if err != errUnrecognized { + return nil, err + } + } + return nil, errUnrecognized +} + +// ParseUncompressed parses an uncompressed protobuf into a profile. +func ParseUncompressed(data []byte) (*Profile, error) { + if len(data) == 0 { + return nil, errNoData + } + p := &Profile{} + if err := unmarshal(data, p); err != nil { + return nil, err + } + + if err := p.postDecode(); err != nil { + return nil, err + } + + return p, nil +} + +var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`) + +// massageMappings applies heuristic-based changes to the profile +// mappings to account for quirks of some environments. +func (p *Profile) massageMappings() { + // Merge adjacent regions with matching names, checking that the offsets match + if len(p.Mapping) > 1 { + mappings := []*Mapping{p.Mapping[0]} + for _, m := range p.Mapping[1:] { + lm := mappings[len(mappings)-1] + if adjacent(lm, m) { + lm.Limit = m.Limit + if m.File != "" { + lm.File = m.File + } + if m.BuildID != "" { + lm.BuildID = m.BuildID + } + p.updateLocationMapping(m, lm) + continue + } + mappings = append(mappings, m) + } + p.Mapping = mappings + } + + // Use heuristics to identify main binary and move it to the top of the list of mappings + for i, m := range p.Mapping { + file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1)) + if len(file) == 0 { + continue + } + if len(libRx.FindStringSubmatch(file)) > 0 { + continue + } + if file[0] == '[' { + continue + } + // Swap what we guess is main to position 0. + p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0] + break + } + + // Keep the mapping IDs neatly sorted + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +// adjacent returns whether two mapping entries represent the same +// mapping that has been split into two. Check that their addresses are adjacent, +// and if the offsets match, if they are available. +func adjacent(m1, m2 *Mapping) bool { + if m1.File != "" && m2.File != "" { + if m1.File != m2.File { + return false + } + } + if m1.BuildID != "" && m2.BuildID != "" { + if m1.BuildID != m2.BuildID { + return false + } + } + if m1.Limit != m2.Start { + return false + } + if m1.Offset != 0 && m2.Offset != 0 { + offset := m1.Offset + (m1.Limit - m1.Start) + if offset != m2.Offset { + return false + } + } + return true +} + +func (p *Profile) updateLocationMapping(from, to *Mapping) { + for _, l := range p.Location { + if l.Mapping == from { + l.Mapping = to + } + } +} + +func serialize(p *Profile) []byte { + p.encodeMu.Lock() + p.preEncode() + b := marshal(p) + p.encodeMu.Unlock() + return b +} + +// Write writes the profile as a gzip-compressed marshaled protobuf. +func (p *Profile) Write(w io.Writer) error { + zw := gzip.NewWriter(w) + defer zw.Close() + _, err := zw.Write(serialize(p)) + return err +} + +// WriteUncompressed writes the profile as a marshaled protobuf. +func (p *Profile) WriteUncompressed(w io.Writer) error { + _, err := w.Write(serialize(p)) + return err +} + +// CheckValid tests whether the profile is valid. Checks include, but are +// not limited to: +// - len(Profile.Sample[n].value) == len(Profile.value_unit) +// - Sample.id has a corresponding Profile.Location +func (p *Profile) CheckValid() error { + // Check that sample values are consistent + sampleLen := len(p.SampleType) + if sampleLen == 0 && len(p.Sample) != 0 { + return fmt.Errorf("missing sample type information") + } + for _, s := range p.Sample { + if s == nil { + return fmt.Errorf("profile has nil sample") + } + if len(s.Value) != sampleLen { + return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType)) + } + for _, l := range s.Location { + if l == nil { + return fmt.Errorf("sample has nil location") + } + } + } + + // Check that all mappings/locations/functions are in the tables + // Check that there are no duplicate ids + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + for _, m := range p.Mapping { + if m == nil { + return fmt.Errorf("profile has nil mapping") + } + if m.ID == 0 { + return fmt.Errorf("found mapping with reserved ID=0") + } + if mappings[m.ID] != nil { + return fmt.Errorf("multiple mappings with same id: %d", m.ID) + } + mappings[m.ID] = m + } + functions := make(map[uint64]*Function, len(p.Function)) + for _, f := range p.Function { + if f == nil { + return fmt.Errorf("profile has nil function") + } + if f.ID == 0 { + return fmt.Errorf("found function with reserved ID=0") + } + if functions[f.ID] != nil { + return fmt.Errorf("multiple functions with same id: %d", f.ID) + } + functions[f.ID] = f + } + locations := make(map[uint64]*Location, len(p.Location)) + for _, l := range p.Location { + if l == nil { + return fmt.Errorf("profile has nil location") + } + if l.ID == 0 { + return fmt.Errorf("found location with reserved id=0") + } + if locations[l.ID] != nil { + return fmt.Errorf("multiple locations with same id: %d", l.ID) + } + locations[l.ID] = l + if m := l.Mapping; m != nil { + if m.ID == 0 || mappings[m.ID] != m { + return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID) + } + } + for _, ln := range l.Line { + f := ln.Function + if f == nil { + return fmt.Errorf("location id: %d has a line with nil function", l.ID) + } + if f.ID == 0 || functions[f.ID] != f { + return fmt.Errorf("inconsistent function %p: %d", f, f.ID) + } + } + } + return nil +} + +// Aggregate merges the locations in the profile into equivalence +// classes preserving the request attributes. It also updates the +// samples to point to the merged locations. +func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnnumber, address bool) error { + for _, m := range p.Mapping { + m.HasInlineFrames = m.HasInlineFrames && inlineFrame + m.HasFunctions = m.HasFunctions && function + m.HasFilenames = m.HasFilenames && filename + m.HasLineNumbers = m.HasLineNumbers && linenumber + } + + // Aggregate functions + if !function || !filename { + for _, f := range p.Function { + if !function { + f.Name = "" + f.SystemName = "" + } + if !filename { + f.Filename = "" + } + } + } + + // Aggregate locations + if !inlineFrame || !address || !linenumber || !columnnumber { + for _, l := range p.Location { + if !inlineFrame && len(l.Line) > 1 { + l.Line = l.Line[len(l.Line)-1:] + } + if !linenumber { + for i := range l.Line { + l.Line[i].Line = 0 + l.Line[i].Column = 0 + } + } + if !columnnumber { + for i := range l.Line { + l.Line[i].Column = 0 + } + } + if !address { + l.Address = 0 + } + } + } + + return p.CheckValid() +} + +// NumLabelUnits returns a map of numeric label keys to the units +// associated with those keys and a map of those keys to any units +// that were encountered but not used. +// Unit for a given key is the first encountered unit for that key. If multiple +// units are encountered for values paired with a particular key, then the first +// unit encountered is used and all other units are returned in sorted order +// in map of ignored units. +// If no units are encountered for a particular key, the unit is then inferred +// based on the key. +func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) { + numLabelUnits := map[string]string{} + ignoredUnits := map[string]map[string]bool{} + encounteredKeys := map[string]bool{} + + // Determine units based on numeric tags for each sample. + for _, s := range p.Sample { + for k := range s.NumLabel { + encounteredKeys[k] = true + for _, unit := range s.NumUnit[k] { + if unit == "" { + continue + } + if wantUnit, ok := numLabelUnits[k]; !ok { + numLabelUnits[k] = unit + } else if wantUnit != unit { + if v, ok := ignoredUnits[k]; ok { + v[unit] = true + } else { + ignoredUnits[k] = map[string]bool{unit: true} + } + } + } + } + } + // Infer units for keys without any units associated with + // numeric tag values. + for key := range encounteredKeys { + unit := numLabelUnits[key] + if unit == "" { + switch key { + case "alignment", "request": + numLabelUnits[key] = "bytes" + default: + numLabelUnits[key] = key + } + } + } + + // Copy ignored units into more readable format + unitsIgnored := make(map[string][]string, len(ignoredUnits)) + for key, values := range ignoredUnits { + units := make([]string, len(values)) + i := 0 + for unit := range values { + units[i] = unit + i++ + } + sort.Strings(units) + unitsIgnored[key] = units + } + + return numLabelUnits, unitsIgnored +} + +// String dumps a text representation of a profile. Intended mainly +// for debugging purposes. +func (p *Profile) String() string { + ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location)) + for _, c := range p.Comments { + ss = append(ss, "Comment: "+c) + } + if pt := p.PeriodType; pt != nil { + ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit)) + } + ss = append(ss, fmt.Sprintf("Period: %d", p.Period)) + if p.TimeNanos != 0 { + ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos))) + } + if p.DurationNanos != 0 { + ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos))) + } + + ss = append(ss, "Samples:") + var sh1 string + for _, s := range p.SampleType { + dflt := "" + if s.Type == p.DefaultSampleType { + dflt = "[dflt]" + } + sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt) + } + ss = append(ss, strings.TrimSpace(sh1)) + for _, s := range p.Sample { + ss = append(ss, s.string()) + } + + ss = append(ss, "Locations") + for _, l := range p.Location { + ss = append(ss, l.string()) + } + + ss = append(ss, "Mappings") + for _, m := range p.Mapping { + ss = append(ss, m.string()) + } + + return strings.Join(ss, "\n") + "\n" +} + +// string dumps a text representation of a mapping. Intended mainly +// for debugging purposes. +func (m *Mapping) string() string { + bits := "" + if m.HasFunctions { + bits = bits + "[FN]" + } + if m.HasFilenames { + bits = bits + "[FL]" + } + if m.HasLineNumbers { + bits = bits + "[LN]" + } + if m.HasInlineFrames { + bits = bits + "[IN]" + } + return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s", + m.ID, + m.Start, m.Limit, m.Offset, + m.File, + m.BuildID, + bits) +} + +// string dumps a text representation of a location. Intended mainly +// for debugging purposes. +func (l *Location) string() string { + ss := []string{} + locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address) + if m := l.Mapping; m != nil { + locStr = locStr + fmt.Sprintf("M=%d ", m.ID) + } + if l.IsFolded { + locStr = locStr + "[F] " + } + if len(l.Line) == 0 { + ss = append(ss, locStr) + } + for li := range l.Line { + lnStr := "??" + if fn := l.Line[li].Function; fn != nil { + lnStr = fmt.Sprintf("%s %s:%d:%d s=%d", + fn.Name, + fn.Filename, + l.Line[li].Line, + l.Line[li].Column, + fn.StartLine) + if fn.Name != fn.SystemName { + lnStr = lnStr + "(" + fn.SystemName + ")" + } + } + ss = append(ss, locStr+lnStr) + // Do not print location details past the first line + locStr = " " + } + return strings.Join(ss, "\n") +} + +// string dumps a text representation of a sample. Intended mainly +// for debugging purposes. +func (s *Sample) string() string { + ss := []string{} + var sv string + for _, v := range s.Value { + sv = fmt.Sprintf("%s %10d", sv, v) + } + sv = sv + ": " + for _, l := range s.Location { + sv = sv + fmt.Sprintf("%d ", l.ID) + } + ss = append(ss, sv) + const labelHeader = " " + if len(s.Label) > 0 { + ss = append(ss, labelHeader+labelsToString(s.Label)) + } + if len(s.NumLabel) > 0 { + ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit)) + } + return strings.Join(ss, "\n") +} + +// labelsToString returns a string representation of a +// map representing labels. +func labelsToString(labels map[string][]string) string { + ls := []string{} + for k, v := range labels { + ls = append(ls, fmt.Sprintf("%s:%v", k, v)) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// numLabelsToString returns a string representation of a map +// representing numeric labels. +func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string { + ls := []string{} + for k, v := range numLabels { + units := numUnits[k] + var labelString string + if len(units) == len(v) { + values := make([]string, len(v)) + for i, vv := range v { + values[i] = fmt.Sprintf("%d %s", vv, units[i]) + } + labelString = fmt.Sprintf("%s:%v", k, values) + } else { + labelString = fmt.Sprintf("%s:%v", k, v) + } + ls = append(ls, labelString) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// SetLabel sets the specified key to the specified value for all samples in the +// profile. +func (p *Profile) SetLabel(key string, value []string) { + for _, sample := range p.Sample { + if sample.Label == nil { + sample.Label = map[string][]string{key: value} + } else { + sample.Label[key] = value + } + } +} + +// RemoveLabel removes all labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveLabel(key string) { + for _, sample := range p.Sample { + delete(sample.Label, key) + } +} + +// HasLabel returns true if a sample has a label with indicated key and value. +func (s *Sample) HasLabel(key, value string) bool { + for _, v := range s.Label[key] { + if v == value { + return true + } + } + return false +} + +// SetNumLabel sets the specified key to the specified value for all samples in the +// profile. "unit" is a slice that describes the units that each corresponding member +// of "values" is measured in (e.g. bytes or seconds). If there is no relevant +// unit for a given value, that member of "unit" should be the empty string. +// "unit" must either have the same length as "value", or be nil. +func (p *Profile) SetNumLabel(key string, value []int64, unit []string) { + for _, sample := range p.Sample { + if sample.NumLabel == nil { + sample.NumLabel = map[string][]int64{key: value} + } else { + sample.NumLabel[key] = value + } + if sample.NumUnit == nil { + sample.NumUnit = map[string][]string{key: unit} + } else { + sample.NumUnit[key] = unit + } + } +} + +// RemoveNumLabel removes all numerical labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveNumLabel(key string) { + for _, sample := range p.Sample { + delete(sample.NumLabel, key) + delete(sample.NumUnit, key) + } +} + +// DiffBaseSample returns true if a sample belongs to the diff base and false +// otherwise. +func (s *Sample) DiffBaseSample() bool { + return s.HasLabel("pprof::base", "true") +} + +// Scale multiplies all sample values in a profile by a constant and keeps +// only samples that have at least one non-zero value. +func (p *Profile) Scale(ratio float64) { + if ratio == 1 { + return + } + ratios := make([]float64, len(p.SampleType)) + for i := range p.SampleType { + ratios[i] = ratio + } + p.ScaleN(ratios) +} + +// ScaleN multiplies each sample values in a sample by a different amount +// and keeps only samples that have at least one non-zero value. +func (p *Profile) ScaleN(ratios []float64) error { + if len(p.SampleType) != len(ratios) { + return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType)) + } + allOnes := true + for _, r := range ratios { + if r != 1 { + allOnes = false + break + } + } + if allOnes { + return nil + } + fillIdx := 0 + for _, s := range p.Sample { + keepSample := false + for i, v := range s.Value { + if ratios[i] != 1 { + val := int64(math.Round(float64(v) * ratios[i])) + s.Value[i] = val + keepSample = keepSample || val != 0 + } + } + if keepSample { + p.Sample[fillIdx] = s + fillIdx++ + } + } + p.Sample = p.Sample[:fillIdx] + return nil +} + +// HasFunctions determines if all locations in this profile have +// symbolized function information. +func (p *Profile) HasFunctions() bool { + for _, l := range p.Location { + if l.Mapping != nil && !l.Mapping.HasFunctions { + return false + } + } + return true +} + +// HasFileLines determines if all locations in this profile have +// symbolized file and line number information. +func (p *Profile) HasFileLines() bool { + for _, l := range p.Location { + if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) { + return false + } + } + return true +} + +// Unsymbolizable returns true if a mapping points to a binary for which +// locations can't be symbolized in principle, at least now. Examples are +// "[vdso]", [vsyscall]" and some others, see the code. +func (m *Mapping) Unsymbolizable() bool { + name := filepath.Base(m.File) + return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") +} + +// Copy makes a fully independent copy of a profile. +func (p *Profile) Copy() *Profile { + pp := &Profile{} + if err := unmarshal(serialize(p), pp); err != nil { + panic(err) + } + if err := pp.postDecode(); err != nil { + panic(err) + } + + return pp +} diff --git a/vendor/github.com/google/pprof/profile/proto.go b/vendor/github.com/google/pprof/profile/proto.go new file mode 100644 index 00000000..a15696ba --- /dev/null +++ b/vendor/github.com/google/pprof/profile/proto.go @@ -0,0 +1,367 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file is a simple protocol buffer encoder and decoder. +// The format is described at +// https://developers.google.com/protocol-buffers/docs/encoding +// +// A protocol message must implement the message interface: +// decoder() []decoder +// encode(*buffer) +// +// The decode method returns a slice indexed by field number that gives the +// function to decode that field. +// The encode method encodes its receiver into the given buffer. +// +// The two methods are simple enough to be implemented by hand rather than +// by using a protocol compiler. +// +// See profile.go for examples of messages implementing this interface. +// +// There is no support for groups, message sets, or "has" bits. + +package profile + +import ( + "errors" + "fmt" +) + +type buffer struct { + field int // field tag + typ int // proto wire type code for field + u64 uint64 + data []byte + tmp [16]byte + tmpLines []Line // temporary storage used while decoding "repeated Line". +} + +type decoder func(*buffer, message) error + +type message interface { + decoder() []decoder + encode(*buffer) +} + +func marshal(m message) []byte { + var b buffer + m.encode(&b) + return b.data +} + +func encodeVarint(b *buffer, x uint64) { + for x >= 128 { + b.data = append(b.data, byte(x)|0x80) + x >>= 7 + } + b.data = append(b.data, byte(x)) +} + +func encodeLength(b *buffer, tag int, len int) { + encodeVarint(b, uint64(tag)<<3|2) + encodeVarint(b, uint64(len)) +} + +func encodeUint64(b *buffer, tag int, x uint64) { + // append varint to b.data + encodeVarint(b, uint64(tag)<<3) + encodeVarint(b, x) +} + +func encodeUint64s(b *buffer, tag int, x []uint64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, u) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeUint64(b, tag, u) + } +} + +func encodeUint64Opt(b *buffer, tag int, x uint64) { + if x == 0 { + return + } + encodeUint64(b, tag, x) +} + +func encodeInt64(b *buffer, tag int, x int64) { + u := uint64(x) + encodeUint64(b, tag, u) +} + +func encodeInt64s(b *buffer, tag int, x []int64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, uint64(u)) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeInt64(b, tag, u) + } +} + +func encodeInt64Opt(b *buffer, tag int, x int64) { + if x == 0 { + return + } + encodeInt64(b, tag, x) +} + +func encodeString(b *buffer, tag int, x string) { + encodeLength(b, tag, len(x)) + b.data = append(b.data, x...) +} + +func encodeStrings(b *buffer, tag int, x []string) { + for _, s := range x { + encodeString(b, tag, s) + } +} + +func encodeBool(b *buffer, tag int, x bool) { + if x { + encodeUint64(b, tag, 1) + } else { + encodeUint64(b, tag, 0) + } +} + +func encodeBoolOpt(b *buffer, tag int, x bool) { + if x { + encodeBool(b, tag, x) + } +} + +func encodeMessage(b *buffer, tag int, m message) { + n1 := len(b.data) + m.encode(b) + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) +} + +func unmarshal(data []byte, m message) (err error) { + b := buffer{data: data, typ: 2} + return decodeMessage(&b, m) +} + +func le64(p []byte) uint64 { + return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56 +} + +func le32(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 +} + +func decodeVarint(data []byte) (uint64, []byte, error) { + var u uint64 + for i := 0; ; i++ { + if i >= 10 || i >= len(data) { + return 0, nil, errors.New("bad varint") + } + u |= uint64(data[i]&0x7F) << uint(7*i) + if data[i]&0x80 == 0 { + return u, data[i+1:], nil + } + } +} + +func decodeField(b *buffer, data []byte) ([]byte, error) { + x, data, err := decodeVarint(data) + if err != nil { + return nil, err + } + b.field = int(x >> 3) + b.typ = int(x & 7) + b.data = nil + b.u64 = 0 + switch b.typ { + case 0: + b.u64, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + case 1: + if len(data) < 8 { + return nil, errors.New("not enough data") + } + b.u64 = le64(data[:8]) + data = data[8:] + case 2: + var n uint64 + n, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + if n > uint64(len(data)) { + return nil, errors.New("too much data") + } + b.data = data[:n] + data = data[n:] + case 5: + if len(data) < 4 { + return nil, errors.New("not enough data") + } + b.u64 = uint64(le32(data[:4])) + data = data[4:] + default: + return nil, fmt.Errorf("unknown wire type: %d", b.typ) + } + + return data, nil +} + +func checkType(b *buffer, typ int) error { + if b.typ != typ { + return errors.New("type mismatch") + } + return nil +} + +func decodeMessage(b *buffer, m message) error { + if err := checkType(b, 2); err != nil { + return err + } + dec := m.decoder() + data := b.data + for len(data) > 0 { + // pull varint field# + type + var err error + data, err = decodeField(b, data) + if err != nil { + return err + } + if b.field >= len(dec) || dec[b.field] == nil { + continue + } + if err := dec[b.field](b, m); err != nil { + return err + } + } + return nil +} + +func decodeInt64(b *buffer, x *int64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = int64(b.u64) + return nil +} + +func decodeInt64s(b *buffer, x *[]int64) error { + if b.typ == 2 { + // Packed encoding + data := b.data + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + *x = append(*x, int64(u)) + } + return nil + } + var i int64 + if err := decodeInt64(b, &i); err != nil { + return err + } + *x = append(*x, i) + return nil +} + +func decodeUint64(b *buffer, x *uint64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = b.u64 + return nil +} + +func decodeUint64s(b *buffer, x *[]uint64) error { + if b.typ == 2 { + data := b.data + // Packed encoding + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + *x = append(*x, u) + } + return nil + } + var u uint64 + if err := decodeUint64(b, &u); err != nil { + return err + } + *x = append(*x, u) + return nil +} + +func decodeString(b *buffer, x *string) error { + if err := checkType(b, 2); err != nil { + return err + } + *x = string(b.data) + return nil +} + +func decodeStrings(b *buffer, x *[]string) error { + var s string + if err := decodeString(b, &s); err != nil { + return err + } + *x = append(*x, s) + return nil +} + +func decodeBool(b *buffer, x *bool) error { + if err := checkType(b, 0); err != nil { + return err + } + if int64(b.u64) == 0 { + *x = false + } else { + *x = true + } + return nil +} diff --git a/vendor/github.com/google/pprof/profile/prune.go b/vendor/github.com/google/pprof/profile/prune.go new file mode 100644 index 00000000..b2f9fd54 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/prune.go @@ -0,0 +1,194 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Implements methods to remove frames from profiles. + +package profile + +import ( + "fmt" + "regexp" + "strings" +) + +var ( + reservedNames = []string{"(anonymous namespace)", "operator()"} + bracketRx = func() *regexp.Regexp { + var quotedNames []string + for _, name := range append(reservedNames, "(") { + quotedNames = append(quotedNames, regexp.QuoteMeta(name)) + } + return regexp.MustCompile(strings.Join(quotedNames, "|")) + }() +) + +// simplifyFunc does some primitive simplification of function names. +func simplifyFunc(f string) string { + // Account for leading '.' on the PPC ELF v1 ABI. + funcName := strings.TrimPrefix(f, ".") + // Account for unsimplified names -- try to remove the argument list by trimming + // starting from the first '(', but skipping reserved names that have '('. + for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) { + foundReserved := false + for _, res := range reservedNames { + if funcName[ind[0]:ind[1]] == res { + foundReserved = true + break + } + } + if !foundReserved { + funcName = funcName[:ind[0]] + break + } + } + return funcName +} + +// Prune removes all nodes beneath a node matching dropRx, and not +// matching keepRx. If the root node of a Sample matches, the sample +// will have an empty stack. +func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) { + prune := make(map[uint64]bool) + pruneBeneath := make(map[uint64]bool) + + // simplifyFunc can be expensive, so cache results. + // Note that the same function name can be encountered many times due + // different lines and addresses in the same function. + pruneCache := map[string]bool{} // Map from function to whether or not to prune + pruneFromHere := func(s string) bool { + if r, ok := pruneCache[s]; ok { + return r + } + funcName := simplifyFunc(s) + if dropRx.MatchString(funcName) { + if keepRx == nil || !keepRx.MatchString(funcName) { + pruneCache[s] = true + return true + } + } + pruneCache[s] = false + return false + } + + for _, loc := range p.Location { + var i int + for i = len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + if pruneFromHere(fn.Name) { + break + } + } + } + + if i >= 0 { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + + // Remove the matching location. + if i == len(loc.Line)-1 { + // Matched the top entry: prune the whole location. + prune[loc.ID] = true + } else { + loc.Line = loc.Line[i+1:] + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the root to the leaves to find the prune location. + // Do not prune frames before the first user frame, to avoid + // pruning everything. + foundUser := false + for i := len(sample.Location) - 1; i >= 0; i-- { + id := sample.Location[i].ID + if !prune[id] && !pruneBeneath[id] { + foundUser = true + continue + } + if !foundUser { + continue + } + if prune[id] { + sample.Location = sample.Location[i+1:] + break + } + if pruneBeneath[id] { + sample.Location = sample.Location[i:] + break + } + } + } +} + +// RemoveUninteresting prunes and elides profiles using built-in +// tables of uninteresting function names. +func (p *Profile) RemoveUninteresting() error { + var keep, drop *regexp.Regexp + var err error + + if p.DropFrames != "" { + if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err) + } + if p.KeepFrames != "" { + if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err) + } + } + p.Prune(drop, keep) + } + return nil +} + +// PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself. +// +// Please see the example below to understand this method as well as +// the difference from Prune method. +// +// A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline. +// +// PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A. +// Prune(A, nil) returns [B,C,B,D] by removing A itself. +// +// PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom. +// Prune(B, nil) returns [D] because a matching node is found by scanning from the root. +func (p *Profile) PruneFrom(dropRx *regexp.Regexp) { + pruneBeneath := make(map[uint64]bool) + + for _, loc := range p.Location { + for i := 0; i < len(loc.Line); i++ { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + funcName := simplifyFunc(fn.Name) + if dropRx.MatchString(funcName) { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + loc.Line = loc.Line[i:] + break + } + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the bottom leaf to the root to find the prune location. + for i, loc := range sample.Location { + if pruneBeneath[loc.ID] { + sample.Location = sample.Location[i:] + break + } + } + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/LICENSE b/vendor/github.com/onsi/ginkgo/v2/LICENSE new file mode 100644 index 00000000..9415ee72 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013-2014 Onsi Fakhouri + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go b/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go new file mode 100644 index 00000000..a61021d0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go @@ -0,0 +1,69 @@ +package config + +// GinkgoConfigType has been deprecated and its equivalent now lives in +// the types package. You can no longer access Ginkgo configuration from the config +// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the +// current configuration +// +// GinkgoConfigType is still here so custom V1 reporters do not result in a compilation error +// It will be removed in a future minor release of Ginkgo +type GinkgoConfigType = DeprecatedGinkgoConfigType +type DeprecatedGinkgoConfigType struct { + RandomSeed int64 + RandomizeAllSpecs bool + RegexScansFilePath bool + FocusStrings []string + SkipStrings []string + SkipMeasurements bool + FailOnPending bool + FailFast bool + FlakeAttempts int + EmitSpecProgress bool + DryRun bool + DebugParallel bool + + ParallelNode int + ParallelTotal int + SyncHost string + StreamHost string +} + +// DefaultReporterConfigType has been deprecated and its equivalent now lives in +// the types package. You can no longer access Ginkgo configuration from the config +// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the +// current configuration +// +// DefaultReporterConfigType is still here so custom V1 reporters do not result in a compilation error +// It will be removed in a future minor release of Ginkgo +type DefaultReporterConfigType = DeprecatedDefaultReporterConfigType +type DeprecatedDefaultReporterConfigType struct { + NoColor bool + SlowSpecThreshold float64 + NoisyPendings bool + NoisySkippings bool + Succinct bool + Verbose bool + FullTrace bool + ReportPassed bool + ReportFile string +} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +type GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +var GinkgoConfig = GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +type DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +var DefaultReporterConfig = DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{} diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go new file mode 100644 index 00000000..778bfd7c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go @@ -0,0 +1,41 @@ +// +build !windows + +/* +These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com + + * go-colorable: + * go-isatty: + +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package formatter + +import ( + "io" + "os" +) + +func newColorable(file *os.File) io.Writer { + return file +} diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go new file mode 100644 index 00000000..dd1d143c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go @@ -0,0 +1,809 @@ +/* +These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com + + * go-colorable: + * go-isatty: + +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package formatter + +import ( + "bytes" + "fmt" + "io" + "math" + "os" + "strconv" + "strings" + "syscall" + "unsafe" +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") +) + +func isTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type writer struct { + out io.Writer + handle syscall.Handle + lastbuf bytes.Buffer + oldattr word +} + +func newColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isTerminal(file.Fd()) { + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &writer{out: file, handle: handle, oldattr: csbi.attributes} + } else { + return file + } +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +func (w *writer) Write(data []byte) (n int, err error) { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + er := bytes.NewBuffer(data) +loop: + for { + r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + if r1 == 0 { + break loop + } + + c1, _, err := er.ReadRune() + if err != nil { + break loop + } + if c1 != 0x1b { + fmt.Fprint(w.out, string(c1)) + continue + } + c2, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + break loop + } + if c2 != 0x5b { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + continue + } + + var buf bytes.Buffer + var m rune + for { + c, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + w.lastbuf.Write(buf.Bytes()) + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + break + } + buf.Write([]byte(string(c))) + } + + var csbi consoleScreenBufferInfo + switch m { + case 'A': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n, err = strconv.Atoi(buf.String()); err == nil { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + } + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H': + token := strings.Split(buf.String(), ";") + if len(token) != 2 { + continue + } + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2) + csbi.cursorPosition.x = short(n1) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i += 1 { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case 1 <= n && n <= 5: + attr |= foregroundIntensity + case n == 7: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 22 == n || n == 25 || n == 25: + attr |= foregroundIntensity + case n == 27: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 30 <= n && n <= 37: + attr = (attr & backgroundMask) + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr = (attr & foregroundMask) + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr)) + } + } + } + } + return len(data) - w.lastbuf.Len(), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + consoleColor{0x000000, false, false, false, false}, + consoleColor{0x000080, false, false, true, false}, + consoleColor{0x008000, false, true, false, false}, + consoleColor{0x008080, false, true, true, false}, + consoleColor{0x800000, true, false, false, false}, + consoleColor{0x800080, true, false, true, false}, + consoleColor{0x808000, true, true, false, false}, + consoleColor{0xc0c0c0, true, true, true, false}, + consoleColor{0x808080, false, false, false, true}, + consoleColor{0x0000ff, false, false, true, true}, + consoleColor{0x00ff00, false, true, false, true}, + consoleColor{0x00ffff, false, true, true, true}, + consoleColor{0xff0000, true, false, false, true}, + consoleColor{0xff00ff, true, false, true, true}, + consoleColor{0xffff00, true, true, false, true}, + consoleColor{0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go new file mode 100644 index 00000000..743555dd --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go @@ -0,0 +1,230 @@ +package formatter + +import ( + "fmt" + "os" + "regexp" + "strconv" + "strings" +) + +// ColorableStdOut and ColorableStdErr enable color output support on Windows +var ColorableStdOut = newColorable(os.Stdout) +var ColorableStdErr = newColorable(os.Stderr) + +const COLS = 80 + +type ColorMode uint8 + +const ( + ColorModeNone ColorMode = iota + ColorModeTerminal + ColorModePassthrough +) + +var SingletonFormatter = New(ColorModeTerminal) + +func F(format string, args ...interface{}) string { + return SingletonFormatter.F(format, args...) +} + +func Fi(indentation uint, format string, args ...interface{}) string { + return SingletonFormatter.Fi(indentation, format, args...) +} + +func Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string { + return SingletonFormatter.Fiw(indentation, maxWidth, format, args...) +} + +type Formatter struct { + ColorMode ColorMode + colors map[string]string + styleRe *regexp.Regexp + preserveColorStylingTags bool +} + +func NewWithNoColorBool(noColor bool) Formatter { + if noColor { + return New(ColorModeNone) + } + return New(ColorModeTerminal) +} + +func New(colorMode ColorMode) Formatter { + colorAliases := map[string]int{ + "black": 0, + "red": 1, + "green": 2, + "yellow": 3, + "blue": 4, + "magenta": 5, + "cyan": 6, + "white": 7, + } + for colorAlias, n := range colorAliases { + colorAliases[fmt.Sprintf("bright-%s", colorAlias)] = n + 8 + } + + getColor := func(color, defaultEscapeCode string) string { + color = strings.ToUpper(strings.ReplaceAll(color, "-", "_")) + envVar := fmt.Sprintf("GINKGO_CLI_COLOR_%s", color) + envVarColor := os.Getenv(envVar) + if envVarColor == "" { + return defaultEscapeCode + } + if colorCode, ok := colorAliases[envVarColor]; ok { + return fmt.Sprintf("\x1b[38;5;%dm", colorCode) + } + colorCode, err := strconv.Atoi(envVarColor) + if err != nil || colorCode < 0 || colorCode > 255 { + return defaultEscapeCode + } + return fmt.Sprintf("\x1b[38;5;%dm", colorCode) + } + + f := Formatter{ + ColorMode: colorMode, + colors: map[string]string{ + "/": "\x1b[0m", + "bold": "\x1b[1m", + "underline": "\x1b[4m", + + "red": getColor("red", "\x1b[38;5;9m"), + "orange": getColor("orange", "\x1b[38;5;214m"), + "coral": getColor("coral", "\x1b[38;5;204m"), + "magenta": getColor("magenta", "\x1b[38;5;13m"), + "green": getColor("green", "\x1b[38;5;10m"), + "dark-green": getColor("dark-green", "\x1b[38;5;28m"), + "yellow": getColor("yellow", "\x1b[38;5;11m"), + "light-yellow": getColor("light-yellow", "\x1b[38;5;228m"), + "cyan": getColor("cyan", "\x1b[38;5;14m"), + "gray": getColor("gray", "\x1b[38;5;243m"), + "light-gray": getColor("light-gray", "\x1b[38;5;246m"), + "blue": getColor("blue", "\x1b[38;5;12m"), + }, + } + colors := []string{} + for color := range f.colors { + colors = append(colors, color) + } + f.styleRe = regexp.MustCompile("{{(" + strings.Join(colors, "|") + ")}}") + return f +} + +func (f Formatter) F(format string, args ...interface{}) string { + return f.Fi(0, format, args...) +} + +func (f Formatter) Fi(indentation uint, format string, args ...interface{}) string { + return f.Fiw(indentation, 0, format, args...) +} + +func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string { + out := f.style(format) + if len(args) > 0 { + out = fmt.Sprintf(out, args...) + } + + if indentation == 0 && maxWidth == 0 { + return out + } + + lines := strings.Split(out, "\n") + + if maxWidth != 0 { + outLines := []string{} + + maxWidth = maxWidth - indentation*2 + for _, line := range lines { + if f.length(line) <= maxWidth { + outLines = append(outLines, line) + continue + } + words := strings.Split(line, " ") + outWords := []string{words[0]} + length := uint(f.length(words[0])) + for _, word := range words[1:] { + wordLength := f.length(word) + if length+wordLength+1 <= maxWidth { + length += wordLength + 1 + outWords = append(outWords, word) + continue + } + outLines = append(outLines, strings.Join(outWords, " ")) + outWords = []string{word} + length = wordLength + } + if len(outWords) > 0 { + outLines = append(outLines, strings.Join(outWords, " ")) + } + } + + lines = outLines + } + + if indentation == 0 { + return strings.Join(lines, "\n") + } + + padding := strings.Repeat(" ", int(indentation)) + for i := range lines { + if lines[i] != "" { + lines[i] = padding + lines[i] + } + } + + return strings.Join(lines, "\n") +} + +func (f Formatter) length(styled string) uint { + n := uint(0) + inStyle := false + for _, b := range styled { + if inStyle { + if b == 'm' { + inStyle = false + } + continue + } + if b == '\x1b' { + inStyle = true + continue + } + n += 1 + } + return n +} + +func (f Formatter) CycleJoin(elements []string, joiner string, cycle []string) string { + if len(elements) == 0 { + return "" + } + n := len(cycle) + out := "" + for i, text := range elements { + out += cycle[i%n] + text + if i < len(elements)-1 { + out += joiner + } + } + out += "{{/}}" + return f.style(out) +} + +func (f Formatter) style(s string) string { + switch f.ColorMode { + case ColorModeNone: + return f.styleRe.ReplaceAllString(s, "") + case ColorModePassthrough: + return s + case ColorModeTerminal: + return f.styleRe.ReplaceAllStringFunc(s, func(match string) string { + if out, ok := f.colors[strings.Trim(match, "{}")]; ok { + return out + } + return match + }) + } + + return "" +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go new file mode 100644 index 00000000..5db5d1a7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go @@ -0,0 +1,63 @@ +package build + +import ( + "fmt" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildBuildCommand() command.Command { + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildBuildCommandFlagSet(&cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "build", + Flags: flags, + Usage: "ginkgo build ", + ShortDoc: "Build the passed in (or the package in the current directory if left blank).", + DocLink: "precompiling-suites", + Command: func(args []string, _ []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + buildSpecs(args, cliConfig, goFlagsConfig) + }, + } +} + +func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) { + suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + internal.VerifyCLIAndFrameworkVersion(suites) + + opc := internal.NewOrderedParallelCompiler(cliConfig.ComputedNumCompilers()) + opc.StartCompiling(suites, goFlagsConfig) + + for { + suiteIdx, suite := opc.Next() + if suiteIdx >= len(suites) { + break + } + suites[suiteIdx] = suite + if suite.State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suite.CompilationError.Error()) + } else { + fmt.Printf("Compiled %s.test\n", suite.PackageName) + } + } + + if suites.CountWithState(internal.TestSuiteStateFailedToCompile) > 0 { + command.AbortWith("Failed to compile all tests") + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go new file mode 100644 index 00000000..2efd2860 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go @@ -0,0 +1,61 @@ +package command + +import "fmt" + +type AbortDetails struct { + ExitCode int + Error error + EmitUsage bool +} + +func Abort(details AbortDetails) { + panic(details) +} + +func AbortGracefullyWith(format string, args ...interface{}) { + Abort(AbortDetails{ + ExitCode: 0, + Error: fmt.Errorf(format, args...), + EmitUsage: false, + }) +} + +func AbortWith(format string, args ...interface{}) { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf(format, args...), + EmitUsage: false, + }) +} + +func AbortWithUsage(format string, args ...interface{}) { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf(format, args...), + EmitUsage: true, + }) +} + +func AbortIfError(preamble string, err error) { + if err != nil { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf("%s\n%s", preamble, err.Error()), + EmitUsage: false, + }) + } +} + +func AbortIfErrors(preamble string, errors []error) { + if len(errors) > 0 { + out := "" + for _, err := range errors { + out += err.Error() + } + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf("%s\n%s", preamble, out), + EmitUsage: false, + }) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go new file mode 100644 index 00000000..12e0e565 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go @@ -0,0 +1,50 @@ +package command + +import ( + "fmt" + "io" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type Command struct { + Name string + Flags types.GinkgoFlagSet + Usage string + ShortDoc string + Documentation string + DocLink string + Command func(args []string, additionalArgs []string) +} + +func (c Command) Run(args []string, additionalArgs []string) { + args, err := c.Flags.Parse(args) + if err != nil { + AbortWithUsage(err.Error()) + } + + c.Command(args, additionalArgs) +} + +func (c Command) EmitUsage(writer io.Writer) { + fmt.Fprintln(writer, formatter.F("{{bold}}"+c.Usage+"{{/}}")) + fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(c.Usage)))) + if c.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.ShortDoc)) + fmt.Fprintln(writer, "") + } + if c.Documentation != "" { + fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.Documentation)) + fmt.Fprintln(writer, "") + } + if c.DocLink != "" { + fmt.Fprintln(writer, formatter.Fi(0, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}", c.DocLink)) + fmt.Fprintln(writer, "") + } + flagUsage := c.Flags.Usage() + if flagUsage != "" { + fmt.Fprintf(writer, formatter.F(flagUsage)) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go new file mode 100644 index 00000000..88dd8d6b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go @@ -0,0 +1,182 @@ +package command + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type Program struct { + Name string + Heading string + Commands []Command + DefaultCommand Command + DeprecatedCommands []DeprecatedCommand + + //For testing - leave as nil in production + OutWriter io.Writer + ErrWriter io.Writer + Exiter func(code int) +} + +type DeprecatedCommand struct { + Name string + Deprecation types.Deprecation +} + +func (p Program) RunAndExit(osArgs []string) { + var command Command + deprecationTracker := types.NewDeprecationTracker() + if p.Exiter == nil { + p.Exiter = os.Exit + } + if p.OutWriter == nil { + p.OutWriter = formatter.ColorableStdOut + } + if p.ErrWriter == nil { + p.ErrWriter = formatter.ColorableStdErr + } + + defer func() { + exitCode := 0 + + if r := recover(); r != nil { + details, ok := r.(AbortDetails) + if !ok { + panic(r) + } + + if details.Error != nil { + fmt.Fprintln(p.ErrWriter, formatter.F("{{red}}{{bold}}%s %s{{/}} {{red}}failed{{/}}", p.Name, command.Name)) + fmt.Fprintln(p.ErrWriter, formatter.Fi(1, details.Error.Error())) + } + if details.EmitUsage { + if details.Error != nil { + fmt.Fprintln(p.ErrWriter, "") + } + command.EmitUsage(p.ErrWriter) + } + exitCode = details.ExitCode + } + + command.Flags.ValidateDeprecations(deprecationTracker) + if deprecationTracker.DidTrackDeprecations() { + fmt.Fprintln(p.ErrWriter, deprecationTracker.DeprecationsReport()) + } + p.Exiter(exitCode) + return + }() + + args, additionalArgs := []string{}, []string{} + + foundDelimiter := false + for _, arg := range osArgs[1:] { + if !foundDelimiter { + if arg == "--" { + foundDelimiter = true + continue + } + } + + if foundDelimiter { + additionalArgs = append(additionalArgs, arg) + } else { + args = append(args, arg) + } + } + + command = p.DefaultCommand + if len(args) > 0 { + p.handleHelpRequestsAndExit(p.OutWriter, args) + if command.Name == args[0] { + args = args[1:] + } else { + for _, deprecatedCommand := range p.DeprecatedCommands { + if deprecatedCommand.Name == args[0] { + deprecationTracker.TrackDeprecation(deprecatedCommand.Deprecation) + return + } + } + for _, tryCommand := range p.Commands { + if tryCommand.Name == args[0] { + command, args = tryCommand, args[1:] + break + } + } + } + } + + command.Run(args, additionalArgs) +} + +func (p Program) handleHelpRequestsAndExit(writer io.Writer, args []string) { + if len(args) == 0 { + return + } + + matchesHelpFlag := func(args ...string) bool { + for _, arg := range args { + if arg == "--help" || arg == "-help" || arg == "-h" || arg == "--h" { + return true + } + } + return false + } + if len(args) == 1 { + if args[0] == "help" || matchesHelpFlag(args[0]) { + p.EmitUsage(writer) + Abort(AbortDetails{}) + } + } else { + var name string + if args[0] == "help" || matchesHelpFlag(args[0]) { + name = args[1] + } else if matchesHelpFlag(args[1:]...) { + name = args[0] + } else { + return + } + + if p.DefaultCommand.Name == name || p.Name == name { + p.DefaultCommand.EmitUsage(writer) + Abort(AbortDetails{}) + } + for _, command := range p.Commands { + if command.Name == name { + command.EmitUsage(writer) + Abort(AbortDetails{}) + } + } + + fmt.Fprintln(writer, formatter.F("{{red}}Unknown Command: {{bold}}%s{{/}}", name)) + fmt.Fprintln(writer, "") + p.EmitUsage(writer) + Abort(AbortDetails{ExitCode: 1}) + } + return +} + +func (p Program) EmitUsage(writer io.Writer) { + fmt.Fprintln(writer, formatter.F(p.Heading)) + fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(p.Heading)))) + fmt.Fprintln(writer, formatter.F("For usage information for a command, run {{bold}}%s help COMMAND{{/}}.", p.Name)) + fmt.Fprintln(writer, formatter.F("For usage information for the default command, run {{bold}}%s help %s{{/}} or {{bold}}%s help %s{{/}}.", p.Name, p.Name, p.Name, p.DefaultCommand.Name)) + fmt.Fprintln(writer, "") + fmt.Fprintln(writer, formatter.F("The following commands are available:")) + + fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} or %s {{bold}}%s{{/}} - {{gray}}%s{{/}}", p.Name, p.Name, p.DefaultCommand.Name, p.DefaultCommand.Usage)) + if p.DefaultCommand.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fi(2, p.DefaultCommand.ShortDoc)) + } + + for _, command := range p.Commands { + fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} - {{gray}}%s{{/}}", command.Name, command.Usage)) + if command.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fi(2, command.ShortDoc)) + } + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go new file mode 100644 index 00000000..a367a1fc --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go @@ -0,0 +1,48 @@ +package generators + +var bootstrapText = `package {{.Package}} + +import ( + "testing" + + {{.GinkgoImport}} + {{.GomegaImport}} +) + +func Test{{.FormattedName}}(t *testing.T) { + {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail) + {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite") +} +` + +var agoutiBootstrapText = `package {{.Package}} + +import ( + "testing" + + {{.GinkgoImport}} + {{.GomegaImport}} + "github.com/sclevine/agouti" +) + +func Test{{.FormattedName}}(t *testing.T) { + {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail) + {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite") +} + +var agoutiDriver *agouti.WebDriver + +var _ = {{.GinkgoPackage}}BeforeSuite(func() { + // Choose a WebDriver: + + agoutiDriver = agouti.PhantomJS() + // agoutiDriver = agouti.Selenium() + // agoutiDriver = agouti.ChromeDriver() + + {{.GomegaPackage}}Expect(agoutiDriver.Start()).To({{.GomegaPackage}}Succeed()) +}) + +var _ = {{.GinkgoPackage}}AfterSuite(func() { + {{.GomegaPackage}}Expect(agoutiDriver.Stop()).To({{.GomegaPackage}}Succeed()) +}) +` diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go new file mode 100644 index 00000000..b2dc59be --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go @@ -0,0 +1,133 @@ +package generators + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "text/template" + + sprig "github.com/go-task/slim-sprig/v3" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildBootstrapCommand() command.Command { + conf := GeneratorsConfig{} + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "agouti", KeyPath: "Agouti", + Usage: "If set, bootstrap will generate a bootstrap file for writing Agouti tests"}, + {Name: "nodot", KeyPath: "NoDot", + Usage: "If set, bootstrap will generate a bootstrap test file that does not dot-import ginkgo and gomega"}, + {Name: "internal", KeyPath: "Internal", + Usage: "If set, bootstrap will generate a bootstrap test file that uses the regular package name (i.e. `package X`, not `package X_test`)"}, + {Name: "template", KeyPath: "CustomTemplate", + UsageArgument: "template-file", + Usage: "If specified, generate will use the contents of the file passed as the bootstrap template"}, + {Name: "template-data", KeyPath: "CustomTemplateData", + UsageArgument: "template-data-file", + Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the bootstrap template"}, + }, + &conf, + types.GinkgoFlagSections{}, + ) + + if err != nil { + panic(err) + } + + return command.Command{ + Name: "bootstrap", + Usage: "ginkgo bootstrap", + ShortDoc: "Bootstrap a test suite for the current package", + Documentation: `Tests written in Ginkgo and Gomega require a small amount of boilerplate to hook into Go's testing infrastructure. + +{{bold}}ginkgo bootstrap{{/}} generates this boilerplate for you in a file named X_suite_test.go where X is the name of the package under test.`, + DocLink: "generators", + Flags: flags, + Command: func(_ []string, _ []string) { + generateBootstrap(conf) + }, + } +} + +type bootstrapData struct { + Package string + FormattedName string + + GinkgoImport string + GomegaImport string + GinkgoPackage string + GomegaPackage string + CustomData map[string]any +} + +func generateBootstrap(conf GeneratorsConfig) { + packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName() + + data := bootstrapData{ + Package: determinePackageName(packageName, conf.Internal), + FormattedName: formattedName, + + GinkgoImport: `. "github.com/onsi/ginkgo/v2"`, + GomegaImport: `. "github.com/onsi/gomega"`, + GinkgoPackage: "", + GomegaPackage: "", + } + + if conf.NoDot { + data.GinkgoImport = `"github.com/onsi/ginkgo/v2"` + data.GomegaImport = `"github.com/onsi/gomega"` + data.GinkgoPackage = `ginkgo.` + data.GomegaPackage = `gomega.` + } + + targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix) + if internal.FileExists(targetFile) { + command.AbortWith("{{bold}}%s{{/}} already exists", targetFile) + } else { + fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile) + } + + f, err := os.Create(targetFile) + command.AbortIfError("Failed to create file:", err) + defer f.Close() + + var templateText string + if conf.CustomTemplate != "" { + tpl, err := os.ReadFile(conf.CustomTemplate) + command.AbortIfError("Failed to read custom bootstrap file:", err) + templateText = string(tpl) + if conf.CustomTemplateData != "" { + var tplCustomDataMap map[string]any + tplCustomData, err := os.ReadFile(conf.CustomTemplateData) + command.AbortIfError("Failed to read custom boostrap data file:", err) + if !json.Valid([]byte(tplCustomData)) { + command.AbortWith("Invalid JSON object in custom data file.") + } + //create map from the custom template data + json.Unmarshal(tplCustomData, &tplCustomDataMap) + data.CustomData = tplCustomDataMap + } + } else if conf.Agouti { + templateText = agoutiBootstrapText + } else { + templateText = bootstrapText + } + + //Setting the option to explicitly fail if template is rendered trying to access missing key + bootstrapTemplate, err := template.New("bootstrap").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText) + command.AbortIfError("Failed to parse bootstrap template:", err) + + buf := &bytes.Buffer{} + //Being explicit about failing sooner during template rendering + //when accessing custom data rather than during the go fmt command + err = bootstrapTemplate.Execute(buf, data) + command.AbortIfError("Failed to render bootstrap template:", err) + + buf.WriteTo(f) + + internal.GoFmt(targetFile) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go new file mode 100644 index 00000000..cf3b7cb6 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go @@ -0,0 +1,265 @@ +package generators + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "text/template" + + sprig "github.com/go-task/slim-sprig/v3" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildGenerateCommand() command.Command { + conf := GeneratorsConfig{} + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "agouti", KeyPath: "Agouti", + Usage: "If set, generate will create a test file for writing Agouti tests"}, + {Name: "nodot", KeyPath: "NoDot", + Usage: "If set, generate will create a test file that does not dot-import ginkgo and gomega"}, + {Name: "internal", KeyPath: "Internal", + Usage: "If set, generate will create a test file that uses the regular package name (i.e. `package X`, not `package X_test`)"}, + {Name: "template", KeyPath: "CustomTemplate", + UsageArgument: "template-file", + Usage: "If specified, generate will use the contents of the file passed as the test file template"}, + {Name: "template-data", KeyPath: "CustomTemplateData", + UsageArgument: "template-data-file", + Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the test file template"}, + {Name: "tags", KeyPath: "Tags", + UsageArgument: "build-tags", + Usage: "If specified, generate will create a test file that uses the given build tags (i.e. `--tags e2e,!unit` will add `//go:build e2e,!unit`)"}, + }, + &conf, + types.GinkgoFlagSections{}, + ) + + if err != nil { + panic(err) + } + + return command.Command{ + Name: "generate", + Usage: "ginkgo generate ", + ShortDoc: "Generate a test file named _test.go", + Documentation: `If the optional argument is omitted, a file named after the package in the current directory will be created. + +You can pass multiple to generate multiple files simultaneously. The resulting files are named _test.go. + +You can also pass a of the form "file.go" and generate will emit "file_test.go".`, + DocLink: "generators", + Flags: flags, + Command: func(args []string, _ []string) { + generateTestFiles(conf, args) + }, + } +} + +type specData struct { + BuildTags string + Package string + Subject string + PackageImportPath string + ImportPackage bool + + GinkgoImport string + GomegaImport string + GinkgoPackage string + GomegaPackage string + CustomData map[string]any +} + +func generateTestFiles(conf GeneratorsConfig, args []string) { + subjects := args + if len(subjects) == 0 { + subjects = []string{""} + } + for _, subject := range subjects { + generateTestFileForSubject(subject, conf) + } +} + +func generateTestFileForSubject(subject string, conf GeneratorsConfig) { + packageName, specFilePrefix, formattedName := getPackageAndFormattedName() + if subject != "" { + specFilePrefix = formatSubject(subject) + formattedName = prettifyName(specFilePrefix) + } + + if conf.Internal { + specFilePrefix = specFilePrefix + "_internal" + } + + data := specData{ + BuildTags: getBuildTags(conf.Tags), + Package: determinePackageName(packageName, conf.Internal), + Subject: formattedName, + PackageImportPath: getPackageImportPath(), + ImportPackage: !conf.Internal, + + GinkgoImport: `. "github.com/onsi/ginkgo/v2"`, + GomegaImport: `. "github.com/onsi/gomega"`, + GinkgoPackage: "", + GomegaPackage: "", + } + + if conf.NoDot { + data.GinkgoImport = `"github.com/onsi/ginkgo/v2"` + data.GomegaImport = `"github.com/onsi/gomega"` + data.GinkgoPackage = `ginkgo.` + data.GomegaPackage = `gomega.` + } + + targetFile := fmt.Sprintf("%s_test.go", specFilePrefix) + if internal.FileExists(targetFile) { + command.AbortWith("{{bold}}%s{{/}} already exists", targetFile) + } else { + fmt.Printf("Generating ginkgo test for %s in:\n %s\n", data.Subject, targetFile) + } + + f, err := os.Create(targetFile) + command.AbortIfError("Failed to create test file:", err) + defer f.Close() + + var templateText string + if conf.CustomTemplate != "" { + tpl, err := os.ReadFile(conf.CustomTemplate) + command.AbortIfError("Failed to read custom template file:", err) + templateText = string(tpl) + if conf.CustomTemplateData != "" { + var tplCustomDataMap map[string]any + tplCustomData, err := os.ReadFile(conf.CustomTemplateData) + command.AbortIfError("Failed to read custom template data file:", err) + if !json.Valid([]byte(tplCustomData)) { + command.AbortWith("Invalid JSON object in custom data file.") + } + //create map from the custom template data + json.Unmarshal(tplCustomData, &tplCustomDataMap) + data.CustomData = tplCustomDataMap + } + } else if conf.Agouti { + templateText = agoutiSpecText + } else { + templateText = specText + } + + //Setting the option to explicitly fail if template is rendered trying to access missing key + specTemplate, err := template.New("spec").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText) + command.AbortIfError("Failed to read parse test template:", err) + + //Being explicit about failing sooner during template rendering + //when accessing custom data rather than during the go fmt command + err = specTemplate.Execute(f, data) + command.AbortIfError("Failed to render bootstrap template:", err) + internal.GoFmt(targetFile) +} + +func formatSubject(name string) string { + name = strings.ReplaceAll(name, "-", "_") + name = strings.ReplaceAll(name, " ", "_") + name = strings.Split(name, ".go")[0] + name = strings.Split(name, "_test")[0] + return name +} + +// moduleName returns module name from go.mod from given module root directory +func moduleName(modRoot string) string { + modFile, err := os.Open(filepath.Join(modRoot, "go.mod")) + if err != nil { + return "" + } + defer modFile.Close() + + mod := make([]byte, 128) + _, err = modFile.Read(mod) + if err != nil { + return "" + } + + slashSlash := []byte("//") + moduleStr := []byte("module") + + for len(mod) > 0 { + line := mod + mod = nil + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, mod = line[:i], line[i+1:] + } + if i := bytes.Index(line, slashSlash); i >= 0 { + line = line[:i] + } + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, moduleStr) { + continue + } + line = line[len(moduleStr):] + n := len(line) + line = bytes.TrimSpace(line) + if len(line) == n || len(line) == 0 { + continue + } + + if line[0] == '"' || line[0] == '`' { + p, err := strconv.Unquote(string(line)) + if err != nil { + return "" // malformed quoted string or multiline module path + } + return p + } + + return string(line) + } + + return "" // missing module path +} + +func findModuleRoot(dir string) (root string) { + dir = filepath.Clean(dir) + + // Look for enclosing go.mod. + for { + if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() { + return dir + } + d := filepath.Dir(dir) + if d == dir { + break + } + dir = d + } + return "" +} + +func getPackageImportPath() string { + workingDir, err := os.Getwd() + if err != nil { + panic(err.Error()) + } + + sep := string(filepath.Separator) + + // Try go.mod file first + modRoot := findModuleRoot(workingDir) + if modRoot != "" { + modName := moduleName(modRoot) + if modName != "" { + cd := strings.ReplaceAll(workingDir, modRoot, "") + cd = strings.ReplaceAll(cd, sep, "/") + return modName + cd + } + } + + // Fallback to GOPATH structure + paths := strings.Split(workingDir, sep+"src"+sep) + if len(paths) == 1 { + fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n") + return "UNKNOWN_PACKAGE_PATH" + } + return filepath.ToSlash(paths[len(paths)-1]) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go new file mode 100644 index 00000000..4dab07d0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go @@ -0,0 +1,43 @@ +package generators + +var specText = `{{.BuildTags}} +package {{.Package}} + +import ( + {{.GinkgoImport}} + {{.GomegaImport}} + + {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}} +) + +var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() { + +}) +` + +var agoutiSpecText = `{{.BuildTags}} +package {{.Package}} + +import ( + {{.GinkgoImport}} + {{.GomegaImport}} + "github.com/sclevine/agouti" + . "github.com/sclevine/agouti/matchers" + + {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}} +) + +var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() { + var page *agouti.Page + + {{.GinkgoPackage}}BeforeEach(func() { + var err error + page, err = agoutiDriver.NewPage() + {{.GomegaPackage}}Expect(err).NotTo({{.GomegaPackage}}HaveOccurred()) + }) + + {{.GinkgoPackage}}AfterEach(func() { + {{.GomegaPackage}}Expect(page.Destroy()).To({{.GomegaPackage}}Succeed()) + }) +}) +` diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go new file mode 100644 index 00000000..28c7aa6f --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go @@ -0,0 +1,76 @@ +package generators + +import ( + "fmt" + "go/build" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +type GeneratorsConfig struct { + Agouti, NoDot, Internal bool + CustomTemplate string + CustomTemplateData string + Tags string +} + +func getPackageAndFormattedName() (string, string, string) { + path, err := os.Getwd() + command.AbortIfError("Could not get current working directory:", err) + + dirName := strings.ReplaceAll(filepath.Base(path), "-", "_") + dirName = strings.ReplaceAll(dirName, " ", "_") + + pkg, err := build.ImportDir(path, 0) + packageName := pkg.Name + if err != nil { + packageName = ensureLegalPackageName(dirName) + } + + formattedName := prettifyName(filepath.Base(path)) + return packageName, dirName, formattedName +} + +func ensureLegalPackageName(name string) string { + if name == "_" { + return "underscore" + } + if len(name) == 0 { + return "empty" + } + n, isDigitErr := strconv.Atoi(string(name[0])) + if isDigitErr == nil { + return []string{"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"}[n] + name[1:] + } + return name +} + +func prettifyName(name string) string { + name = strings.ReplaceAll(name, "-", " ") + name = strings.ReplaceAll(name, "_", " ") + name = strings.Title(name) + name = strings.ReplaceAll(name, " ", "") + return name +} + +func determinePackageName(name string, internal bool) string { + if internal { + return name + } + + return name + "_test" +} + +// getBuildTags returns the resultant string to be added. +// If the input string is not empty, then returns a `//go:build {}` string, +// otherwise returns an empty string. +func getBuildTags(tags string) string { + if tags != "" { + return fmt.Sprintf("//go:build %s\n", tags) + } + return "" +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go new file mode 100644 index 00000000..86da7340 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go @@ -0,0 +1,161 @@ +package internal + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + + "github.com/onsi/ginkgo/v2/types" +) + +func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite { + if suite.PathToCompiledTest != "" { + return suite + } + + suite.CompilationError = nil + + path, err := filepath.Abs(filepath.Join(suite.Path, suite.PackageName+".test")) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compute compilation target path:\n%s", err.Error()) + return suite + } + + ginkgoInvocationPath, _ := os.Getwd() + ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath) + packagePath := suite.AbsPath() + pathToInvocationPath, err := filepath.Rel(packagePath, ginkgoInvocationPath) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error()) + return suite + } + args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./", pathToInvocationPath) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error()) + return suite + } + + cmd := exec.Command("go", args...) + cmd.Dir = suite.Path + output, err := cmd.CombinedOutput() + if err != nil { + if len(output) > 0 { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s:\n\n%s", suite.PackageName, output) + } else { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s\n%s", suite.PackageName, err.Error()) + } + return suite + } + + if strings.Contains(string(output), "[no test files]") { + suite.State = TestSuiteStateSkippedDueToEmptyCompilation + return suite + } + + if len(output) > 0 { + fmt.Println(string(output)) + } + + if !FileExists(path) { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s:\nOutput file %s could not be found", suite.PackageName, path) + return suite + } + + suite.State = TestSuiteStateCompiled + suite.PathToCompiledTest = path + return suite +} + +func Cleanup(goFlagsConfig types.GoFlagsConfig, suites ...TestSuite) { + if goFlagsConfig.BinaryMustBePreserved() { + return + } + for _, suite := range suites { + if !suite.Precompiled { + os.Remove(suite.PathToCompiledTest) + } + } +} + +type parallelSuiteBundle struct { + suite TestSuite + compiled chan TestSuite +} + +type OrderedParallelCompiler struct { + mutex *sync.Mutex + stopped bool + numCompilers int + + idx int + numSuites int + completionChannels []chan TestSuite +} + +func NewOrderedParallelCompiler(numCompilers int) *OrderedParallelCompiler { + return &OrderedParallelCompiler{ + mutex: &sync.Mutex{}, + numCompilers: numCompilers, + } +} + +func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig) { + opc.stopped = false + opc.idx = 0 + opc.numSuites = len(suites) + opc.completionChannels = make([]chan TestSuite, opc.numSuites) + + toCompile := make(chan parallelSuiteBundle, opc.numCompilers) + for compiler := 0; compiler < opc.numCompilers; compiler++ { + go func() { + for bundle := range toCompile { + c, suite := bundle.compiled, bundle.suite + opc.mutex.Lock() + stopped := opc.stopped + opc.mutex.Unlock() + if !stopped { + suite = CompileSuite(suite, goFlagsConfig) + } + c <- suite + } + }() + } + + for idx, suite := range suites { + opc.completionChannels[idx] = make(chan TestSuite, 1) + toCompile <- parallelSuiteBundle{suite, opc.completionChannels[idx]} + if idx == 0 { //compile first suite serially + suite = <-opc.completionChannels[0] + opc.completionChannels[0] <- suite + } + } + + close(toCompile) +} + +func (opc *OrderedParallelCompiler) Next() (int, TestSuite) { + if opc.idx >= opc.numSuites { + return opc.numSuites, TestSuite{} + } + + idx := opc.idx + suite := <-opc.completionChannels[idx] + opc.idx = opc.idx + 1 + + return idx, suite +} + +func (opc *OrderedParallelCompiler) StopAndDrain() { + opc.mutex.Lock() + opc.stopped = true + opc.mutex.Unlock() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go new file mode 100644 index 00000000..3c5079ff --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go @@ -0,0 +1,129 @@ +// Copyright (c) 2015, Wade Simmons +// All rights reserved. + +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: + +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. + +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package gocovmerge takes the results from multiple `go test -coverprofile` +// runs and merges them into one profile + +// this file was originally taken from the gocovmerge project +// see also: https://go.shabbyrobe.org/gocovmerge +package internal + +import ( + "fmt" + "io" + "sort" + + "golang.org/x/tools/cover" +) + +func AddCoverProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile { + i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName }) + if i < len(profiles) && profiles[i].FileName == p.FileName { + MergeCoverProfiles(profiles[i], p) + } else { + profiles = append(profiles, nil) + copy(profiles[i+1:], profiles[i:]) + profiles[i] = p + } + return profiles +} + +func DumpCoverProfiles(profiles []*cover.Profile, out io.Writer) error { + if len(profiles) == 0 { + return nil + } + if _, err := fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode); err != nil { + return err + } + for _, p := range profiles { + for _, b := range p.Blocks { + if _, err := fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count); err != nil { + return err + } + } + } + return nil +} + +func MergeCoverProfiles(into *cover.Profile, merge *cover.Profile) error { + if into.Mode != merge.Mode { + return fmt.Errorf("cannot merge profiles with different modes") + } + // Since the blocks are sorted, we can keep track of where the last block + // was inserted and only look at the blocks after that as targets for merge + startIndex := 0 + for _, b := range merge.Blocks { + var err error + startIndex, err = mergeProfileBlock(into, b, startIndex) + if err != nil { + return err + } + } + return nil +} + +func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) (int, error) { + sortFunc := func(i int) bool { + pi := p.Blocks[i+startIndex] + return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol) + } + + i := 0 + if sortFunc(i) != true { + i = sort.Search(len(p.Blocks)-startIndex, sortFunc) + } + + i += startIndex + if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol { + if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol { + return i, fmt.Errorf("gocovmerge: overlapping merge %v %v %v", p.FileName, p.Blocks[i], pb) + } + switch p.Mode { + case "set": + p.Blocks[i].Count |= pb.Count + case "count", "atomic": + p.Blocks[i].Count += pb.Count + default: + return i, fmt.Errorf("gocovmerge: unsupported covermode '%s'", p.Mode) + } + + } else { + if i > 0 { + pa := p.Blocks[i-1] + if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) { + return i, fmt.Errorf("gocovmerge: overlap before %v %v %v", p.FileName, pa, pb) + } + } + if i < len(p.Blocks)-1 { + pa := p.Blocks[i+1] + if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) { + return i, fmt.Errorf("gocovmerge: overlap after %v %v %v", p.FileName, pa, pb) + } + } + p.Blocks = append(p.Blocks, cover.ProfileBlock{}) + copy(p.Blocks[i+1:], p.Blocks[i:]) + p.Blocks[i] = pb + } + + return i + 1, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go new file mode 100644 index 00000000..8e16d2bb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -0,0 +1,227 @@ +package internal + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + + "github.com/google/pprof/profile" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" + "golang.org/x/tools/cover" +) + +func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string { + suffix := "" + if process != 0 { + suffix = fmt.Sprintf(".%d", process) + } + if cliConfig.OutputDir == "" { + return filepath.Join(suite.AbsPath(), assetName+suffix) + } + outputDir, _ := filepath.Abs(cliConfig.OutputDir) + return filepath.Join(outputDir, suite.NamespacedName()+"_"+assetName+suffix) +} + +func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIConfig, suiteConfig types.SuiteConfig, reporterConfig types.ReporterConfig, goFlagsConfig types.GoFlagsConfig) ([]string, error) { + messages := []string{} + suitesWithProfiles := suites.WithState(TestSuiteStatePassed, TestSuiteStateFailed) //anything else won't have actually run and generated a profile + + // merge cover profiles if need be + if goFlagsConfig.Cover && !cliConfig.KeepSeparateCoverprofiles { + coverProfiles := []string{} + for _, suite := range suitesWithProfiles { + if !suite.HasProgrammaticFocus { + coverProfiles = append(coverProfiles, AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)) + } + } + + if len(coverProfiles) > 0 { + dst := goFlagsConfig.CoverProfile + if cliConfig.OutputDir != "" { + dst = filepath.Join(cliConfig.OutputDir, goFlagsConfig.CoverProfile) + } + err := MergeAndCleanupCoverProfiles(coverProfiles, dst) + if err != nil { + return messages, err + } + coverage, err := GetCoverageFromCoverProfile(dst) + if err != nil { + return messages, err + } + if coverage == 0 { + messages = append(messages, "composite coverage: [no statements]") + } else if suitesWithProfiles.AnyHaveProgrammaticFocus() { + messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements however some suites did not contribute because they included programatically focused specs", coverage)) + } else { + messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements", coverage)) + } + } else { + messages = append(messages, "no composite coverage computed: all suites included programatically focused specs") + } + } + + // copy binaries if need be + for _, suite := range suitesWithProfiles { + if goFlagsConfig.BinaryMustBePreserved() && cliConfig.OutputDir != "" { + src := suite.PathToCompiledTest + dst := filepath.Join(cliConfig.OutputDir, suite.NamespacedName()+".test") + if suite.Precompiled { + if err := CopyFile(src, dst); err != nil { + return messages, err + } + } else { + if err := os.Rename(src, dst); err != nil { + return messages, err + } + } + } + } + + type reportFormat struct { + ReportName string + GenerateFunc func(types.Report, string) error + MergeFunc func([]string, string) ([]string, error) + } + reportFormats := []reportFormat{} + if reporterConfig.JSONReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JSONReport, GenerateFunc: reporters.GenerateJSONReport, MergeFunc: reporters.MergeAndCleanupJSONReports}) + } + if reporterConfig.JUnitReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JUnitReport, GenerateFunc: reporters.GenerateJUnitReport, MergeFunc: reporters.MergeAndCleanupJUnitReports}) + } + if reporterConfig.TeamcityReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.TeamcityReport, GenerateFunc: reporters.GenerateTeamcityReport, MergeFunc: reporters.MergeAndCleanupTeamcityReports}) + } + + // Generate reports for suites that failed to run + reportableSuites := suites.ThatAreGinkgoSuites() + for _, suite := range reportableSuites.WithState(TestSuiteStateFailedToCompile, TestSuiteStateFailedDueToTimeout, TestSuiteStateSkippedDueToPriorFailures, TestSuiteStateSkippedDueToEmptyCompilation) { + report := types.Report{ + SuitePath: suite.AbsPath(), + SuiteConfig: suiteConfig, + SuiteSucceeded: false, + } + switch suite.State { + case TestSuiteStateFailedToCompile: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, suite.CompilationError.Error()) + case TestSuiteStateFailedDueToTimeout: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, TIMEOUT_ELAPSED_FAILURE_REASON) + case TestSuiteStateSkippedDueToPriorFailures: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, PRIOR_FAILURES_FAILURE_REASON) + case TestSuiteStateSkippedDueToEmptyCompilation: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, EMPTY_SKIP_FAILURE_REASON) + report.SuiteSucceeded = true + } + + for _, format := range reportFormats { + format.GenerateFunc(report, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0)) + } + } + + // Merge reports unless we've been asked to keep them separate + if !cliConfig.KeepSeparateReports { + for _, format := range reportFormats { + reports := []string{} + for _, suite := range reportableSuites { + reports = append(reports, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0)) + } + dst := format.ReportName + if cliConfig.OutputDir != "" { + dst = filepath.Join(cliConfig.OutputDir, format.ReportName) + } + mergeMessages, err := format.MergeFunc(reports, dst) + messages = append(messages, mergeMessages...) + if err != nil { + return messages, err + } + } + } + + return messages, nil +} + +// loads each profile, merges them, deletes them, stores them in destination +func MergeAndCleanupCoverProfiles(profiles []string, destination string) error { + var merged []*cover.Profile + for _, file := range profiles { + parsedProfiles, err := cover.ParseProfiles(file) + if err != nil { + return err + } + os.Remove(file) + for _, p := range parsedProfiles { + merged = AddCoverProfile(merged, p) + } + } + dst, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return err + } + defer dst.Close() + err = DumpCoverProfiles(merged, dst) + if err != nil { + return err + } + return nil +} + +func GetCoverageFromCoverProfile(profile string) (float64, error) { + cmd := exec.Command("go", "tool", "cover", "-func", profile) + output, err := cmd.CombinedOutput() + if err != nil { + return 0, fmt.Errorf("Could not process Coverprofile %s: %s - %s", profile, err.Error(), string(output)) + } + re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`) + matches := re.FindStringSubmatch(string(output)) + if matches == nil { + return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage") + } + coverageString := matches[1] + coverage, err := strconv.ParseFloat(coverageString, 64) + if err != nil { + return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage: %s", err.Error()) + } + + return coverage, nil +} + +func MergeProfiles(profilePaths []string, destination string) error { + profiles := []*profile.Profile{} + for _, profilePath := range profilePaths { + proFile, err := os.Open(profilePath) + if err != nil { + return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error()) + } + prof, err := profile.Parse(proFile) + _ = proFile.Close() + if err != nil { + return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error()) + } + profiles = append(profiles, prof) + os.Remove(profilePath) + } + + mergedProfile, err := profile.Merge(profiles) + if err != nil { + return fmt.Errorf("Could not merge profiles:\n%s", err.Error()) + } + + outFile, err := os.Create(destination) + if err != nil { + return fmt.Errorf("Could not create merged profile %s:\n%s", destination, err.Error()) + } + err = mergedProfile.Write(outFile) + if err != nil { + return fmt.Errorf("Could not write merged profile %s:\n%s", destination, err.Error()) + } + err = outFile.Close() + if err != nil { + return fmt.Errorf("Could not close merged profile %s:\n%s", destination, err.Error()) + } + + return nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go new file mode 100644 index 00000000..41052ea1 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go @@ -0,0 +1,355 @@ +package internal + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "syscall" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/internal/parallel_support" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +func RunCompiledSuite(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + suite.State = TestSuiteStateFailed + suite.HasProgrammaticFocus = false + + if suite.PathToCompiledTest == "" { + return suite + } + + if suite.IsGinkgo && cliConfig.ComputedProcs() > 1 { + suite = runParallel(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs) + } else if suite.IsGinkgo { + suite = runSerial(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs) + } else { + suite = runGoTest(suite, cliConfig, goFlagsConfig) + } + runAfterRunHook(cliConfig.AfterRunHook, reporterConfig.NoColor, suite) + return suite +} + +func buildAndStartCommand(suite TestSuite, args []string, pipeToStdout bool) (*exec.Cmd, *bytes.Buffer) { + buf := &bytes.Buffer{} + cmd := exec.Command(suite.PathToCompiledTest, args...) + cmd.Dir = suite.Path + if pipeToStdout { + cmd.Stderr = io.MultiWriter(os.Stdout, buf) + cmd.Stdout = os.Stdout + } else { + cmd.Stderr = buf + cmd.Stdout = buf + } + err := cmd.Start() + command.AbortIfError("Failed to start test suite", err) + + return cmd, buf +} + +func checkForNoTestsWarning(buf *bytes.Buffer) bool { + if strings.Contains(buf.String(), "warning: no tests to run") { + fmt.Fprintf(os.Stderr, `Found no test suites, did you forget to run "ginkgo bootstrap"?`) + return true + } + return false +} + +func runGoTest(suite TestSuite, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) TestSuite { + // As we run the go test from the suite directory, make sure the cover profile is absolute + // and placed into the expected output directory when one is configured. + if goFlagsConfig.Cover && !filepath.IsAbs(goFlagsConfig.CoverProfile) { + goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + } + + args, err := types.GenerateGoTestRunArgs(goFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + cmd, buf := buildAndStartCommand(suite, args, true) + + cmd.Wait() + + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + return suite +} + +func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + if goFlagsConfig.Cover { + goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + } + if goFlagsConfig.BlockProfile != "" { + goFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0) + } + if goFlagsConfig.CPUProfile != "" { + goFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0) + } + if goFlagsConfig.MemProfile != "" { + goFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0) + } + if goFlagsConfig.MutexProfile != "" { + goFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0) + } + if reporterConfig.JSONReport != "" { + reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) + } + if reporterConfig.JUnitReport != "" { + reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) + } + if reporterConfig.TeamcityReport != "" { + reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0) + } + + args, err := types.GenerateGinkgoTestRunArgs(ginkgoConfig, reporterConfig, goFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + args = append([]string{"--test.timeout=0"}, args...) + args = append(args, additionalArgs...) + + cmd, buf := buildAndStartCommand(suite, args, true) + + cmd.Wait() + + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + suite.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + if suite.HasProgrammaticFocus { + if goFlagsConfig.Cover { + fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused") + } + if goFlagsConfig.BlockProfile != "" { + fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused") + } + if goFlagsConfig.CPUProfile != "" { + fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused") + } + if goFlagsConfig.MemProfile != "" { + fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused") + } + if goFlagsConfig.MutexProfile != "" { + fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused") + } + } + + return suite +} + +func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + type procResult struct { + passed bool + hasProgrammaticFocus bool + } + + numProcs := cliConfig.ComputedProcs() + procOutput := make([]*bytes.Buffer, numProcs) + coverProfiles := []string{} + + blockProfiles := []string{} + cpuProfiles := []string{} + memProfiles := []string{} + mutexProfiles := []string{} + + procResults := make(chan procResult) + + server, err := parallel_support.NewServer(numProcs, reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut)) + command.AbortIfError("Failed to start parallel spec server", err) + server.Start() + defer server.Close() + + if reporterConfig.JSONReport != "" { + reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) + } + if reporterConfig.JUnitReport != "" { + reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) + } + if reporterConfig.TeamcityReport != "" { + reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0) + } + + for proc := 1; proc <= numProcs; proc++ { + procGinkgoConfig := ginkgoConfig + procGinkgoConfig.ParallelProcess, procGinkgoConfig.ParallelTotal, procGinkgoConfig.ParallelHost = proc, numProcs, server.Address() + + procGoFlagsConfig := goFlagsConfig + if goFlagsConfig.Cover { + procGoFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, proc) + coverProfiles = append(coverProfiles, procGoFlagsConfig.CoverProfile) + } + if goFlagsConfig.BlockProfile != "" { + procGoFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, proc) + blockProfiles = append(blockProfiles, procGoFlagsConfig.BlockProfile) + } + if goFlagsConfig.CPUProfile != "" { + procGoFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, proc) + cpuProfiles = append(cpuProfiles, procGoFlagsConfig.CPUProfile) + } + if goFlagsConfig.MemProfile != "" { + procGoFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, proc) + memProfiles = append(memProfiles, procGoFlagsConfig.MemProfile) + } + if goFlagsConfig.MutexProfile != "" { + procGoFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, proc) + mutexProfiles = append(mutexProfiles, procGoFlagsConfig.MutexProfile) + } + + args, err := types.GenerateGinkgoTestRunArgs(procGinkgoConfig, reporterConfig, procGoFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + args = append([]string{"--test.timeout=0"}, args...) + args = append(args, additionalArgs...) + + cmd, buf := buildAndStartCommand(suite, args, false) + procOutput[proc-1] = buf + server.RegisterAlive(proc, func() bool { return cmd.ProcessState == nil || !cmd.ProcessState.Exited() }) + + go func() { + cmd.Wait() + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + procResults <- procResult{ + passed: (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE), + hasProgrammaticFocus: exitStatus == types.GINKGO_FOCUS_EXIT_CODE, + } + }() + } + + passed := true + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + result := <-procResults + passed = passed && result.passed + suite.HasProgrammaticFocus = suite.HasProgrammaticFocus || result.hasProgrammaticFocus + } + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + select { + case <-server.GetSuiteDone(): + fmt.Println("") + case <-time.After(time.Second): + //one of the nodes never finished reporting to the server. Something must have gone wrong. + fmt.Fprint(formatter.ColorableStdErr, formatter.F("\n{{bold}}{{red}}Ginkgo timed out waiting for all parallel procs to report back{{/}}\n")) + fmt.Fprint(formatter.ColorableStdErr, formatter.F("{{gray}}Test suite:{{/}} %s (%s)\n\n", suite.PackageName, suite.Path)) + fmt.Fprint(formatter.ColorableStdErr, formatter.Fiw(0, formatter.COLS, "This occurs if a parallel process exits before it reports its results to the Ginkgo CLI. The CLI will now print out all the stdout/stderr output it's collected from the running processes. However you may not see anything useful in these logs because the individual test processes usually intercept output to stdout/stderr in order to capture it in the spec reports.\n\nYou may want to try rerunning your test suite with {{light-gray}}--output-interceptor-mode=none{{/}} to see additional output here and debug your suite.\n")) + fmt.Fprintln(formatter.ColorableStdErr, " ") + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{bold}}Output from proc %d:{{/}}\n", proc)) + fmt.Fprintln(os.Stderr, formatter.Fi(1, "%s", procOutput[proc-1].String())) + } + fmt.Fprintf(os.Stderr, "** End **") + } + + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + output := procOutput[proc-1].String() + if proc == 1 && checkForNoTestsWarning(procOutput[0]) && cliConfig.RequireSuite { + suite.State = TestSuiteStateFailed + } + if strings.Contains(output, "deprecated Ginkgo functionality") { + fmt.Fprintln(os.Stderr, output) + } + } + + if len(coverProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused") + } else { + coverProfile := AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + err := MergeAndCleanupCoverProfiles(coverProfiles, coverProfile) + command.AbortIfError("Failed to combine cover profiles", err) + + coverage, err := GetCoverageFromCoverProfile(coverProfile) + command.AbortIfError("Failed to compute coverage", err) + if coverage == 0 { + fmt.Fprintln(os.Stdout, "coverage: [no statements]") + } else { + fmt.Fprintf(os.Stdout, "coverage: %.1f%% of statements\n", coverage) + } + } + } + if len(blockProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused") + } else { + blockProfile := AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0) + err := MergeProfiles(blockProfiles, blockProfile) + command.AbortIfError("Failed to combine blockprofiles", err) + } + } + if len(cpuProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused") + } else { + cpuProfile := AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0) + err := MergeProfiles(cpuProfiles, cpuProfile) + command.AbortIfError("Failed to combine cpuprofiles", err) + } + } + if len(memProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused") + } else { + memProfile := AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0) + err := MergeProfiles(memProfiles, memProfile) + command.AbortIfError("Failed to combine memprofiles", err) + } + } + if len(mutexProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused") + } else { + mutexProfile := AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0) + err := MergeProfiles(mutexProfiles, mutexProfile) + command.AbortIfError("Failed to combine mutexprofiles", err) + } + } + + return suite +} + +func runAfterRunHook(command string, noColor bool, suite TestSuite) { + if command == "" { + return + } + f := formatter.NewWithNoColorBool(noColor) + + // Allow for string replacement to pass input to the command + passed := "[FAIL]" + if suite.State.Is(TestSuiteStatePassed) { + passed = "[PASS]" + } + command = strings.ReplaceAll(command, "(ginkgo-suite-passed)", passed) + command = strings.ReplaceAll(command, "(ginkgo-suite-name)", suite.PackageName) + + // Must break command into parts + splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`) + parts := splitArgs.FindAllString(command, -1) + + output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput() + if err != nil { + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{red}}{{bold}}After-run-hook failed:{{/}}")) + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{red}}%s{{/}}", output)) + } else { + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{green}}{{bold}}After-run-hook succeeded:{{/}}")) + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{green}}%s{{/}}", output)) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go new file mode 100644 index 00000000..df99875b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go @@ -0,0 +1,284 @@ +package internal + +import ( + "errors" + "math/rand" + "os" + "path" + "path/filepath" + "regexp" + "runtime" + "strings" + + "github.com/onsi/ginkgo/v2/types" +) + +const TIMEOUT_ELAPSED_FAILURE_REASON = "Suite did not run because the timeout elapsed" +const PRIOR_FAILURES_FAILURE_REASON = "Suite did not run because prior suites failed and --keep-going is not set" +const EMPTY_SKIP_FAILURE_REASON = "Suite did not run go test reported that no test files were found" + +type TestSuiteState uint + +const ( + TestSuiteStateInvalid TestSuiteState = iota + + TestSuiteStateUncompiled + TestSuiteStateCompiled + + TestSuiteStatePassed + + TestSuiteStateSkippedDueToEmptyCompilation + TestSuiteStateSkippedByFilter + TestSuiteStateSkippedDueToPriorFailures + + TestSuiteStateFailed + TestSuiteStateFailedDueToTimeout + TestSuiteStateFailedToCompile +) + +var TestSuiteStateFailureStates = []TestSuiteState{TestSuiteStateFailed, TestSuiteStateFailedDueToTimeout, TestSuiteStateFailedToCompile} + +func (state TestSuiteState) Is(states ...TestSuiteState) bool { + for _, suiteState := range states { + if suiteState == state { + return true + } + } + + return false +} + +type TestSuite struct { + Path string + PackageName string + IsGinkgo bool + + Precompiled bool + PathToCompiledTest string + CompilationError error + + HasProgrammaticFocus bool + State TestSuiteState +} + +func (ts TestSuite) AbsPath() string { + path, _ := filepath.Abs(ts.Path) + return path +} + +func (ts TestSuite) NamespacedName() string { + name := relPath(ts.Path) + name = strings.TrimLeft(name, "."+string(filepath.Separator)) + name = strings.ReplaceAll(name, string(filepath.Separator), "_") + name = strings.ReplaceAll(name, " ", "_") + if name == "" { + return ts.PackageName + } + return name +} + +type TestSuites []TestSuite + +func (ts TestSuites) AnyHaveProgrammaticFocus() bool { + for _, suite := range ts { + if suite.HasProgrammaticFocus { + return true + } + } + + return false +} + +func (ts TestSuites) ThatAreGinkgoSuites() TestSuites { + out := TestSuites{} + for _, suite := range ts { + if suite.IsGinkgo { + out = append(out, suite) + } + } + return out +} + +func (ts TestSuites) CountWithState(states ...TestSuiteState) int { + n := 0 + for _, suite := range ts { + if suite.State.Is(states...) { + n += 1 + } + } + + return n +} + +func (ts TestSuites) WithState(states ...TestSuiteState) TestSuites { + out := TestSuites{} + for _, suite := range ts { + if suite.State.Is(states...) { + out = append(out, suite) + } + } + + return out +} + +func (ts TestSuites) WithoutState(states ...TestSuiteState) TestSuites { + out := TestSuites{} + for _, suite := range ts { + if !suite.State.Is(states...) { + out = append(out, suite) + } + } + + return out +} + +func (ts TestSuites) ShuffledCopy(seed int64) TestSuites { + out := make(TestSuites, len(ts)) + permutation := rand.New(rand.NewSource(seed)).Perm(len(ts)) + for i, j := range permutation { + out[i] = ts[j] + } + return out +} + +func FindSuites(args []string, cliConfig types.CLIConfig, allowPrecompiled bool) TestSuites { + suites := TestSuites{} + + if len(args) > 0 { + for _, arg := range args { + if allowPrecompiled { + suite, err := precompiledTestSuite(arg) + if err == nil { + suites = append(suites, suite) + continue + } + } + recurseForSuite := cliConfig.Recurse + if strings.HasSuffix(arg, "/...") && arg != "/..." { + arg = arg[:len(arg)-4] + recurseForSuite = true + } + suites = append(suites, suitesInDir(arg, recurseForSuite)...) + } + } else { + suites = suitesInDir(".", cliConfig.Recurse) + } + + if cliConfig.SkipPackage != "" { + skipFilters := strings.Split(cliConfig.SkipPackage, ",") + for idx := range suites { + for _, skipFilter := range skipFilters { + if strings.Contains(suites[idx].Path, skipFilter) { + suites[idx].State = TestSuiteStateSkippedByFilter + break + } + } + } + } + + return suites +} + +func precompiledTestSuite(path string) (TestSuite, error) { + info, err := os.Stat(path) + if err != nil { + return TestSuite{}, err + } + + if info.IsDir() { + return TestSuite{}, errors.New("this is a directory, not a file") + } + + if filepath.Ext(path) != ".test" && filepath.Ext(path) != ".exe" { + return TestSuite{}, errors.New("this is not a .test binary") + } + + if filepath.Ext(path) == ".test" && runtime.GOOS != "windows" && info.Mode()&0111 == 0 { + return TestSuite{}, errors.New("this is not executable") + } + + dir := relPath(filepath.Dir(path)) + packageName := strings.TrimSuffix(filepath.Base(path), ".exe") + packageName = strings.TrimSuffix(packageName, ".test") + + path, err = filepath.Abs(path) + if err != nil { + return TestSuite{}, err + } + + return TestSuite{ + Path: dir, + PackageName: packageName, + IsGinkgo: true, + Precompiled: true, + PathToCompiledTest: path, + State: TestSuiteStateCompiled, + }, nil +} + +func suitesInDir(dir string, recurse bool) TestSuites { + suites := TestSuites{} + + if path.Base(dir) == "vendor" { + return suites + } + + files, _ := os.ReadDir(dir) + re := regexp.MustCompile(`^[^._].*_test\.go$`) + for _, file := range files { + if !file.IsDir() && re.MatchString(file.Name()) { + suite := TestSuite{ + Path: relPath(dir), + PackageName: packageNameForSuite(dir), + IsGinkgo: filesHaveGinkgoSuite(dir, files), + State: TestSuiteStateUncompiled, + } + suites = append(suites, suite) + break + } + } + + if recurse { + re = regexp.MustCompile(`^[._]`) + for _, file := range files { + if file.IsDir() && !re.MatchString(file.Name()) { + suites = append(suites, suitesInDir(dir+"/"+file.Name(), recurse)...) + } + } + } + + return suites +} + +func relPath(dir string) string { + dir, _ = filepath.Abs(dir) + cwd, _ := os.Getwd() + dir, _ = filepath.Rel(cwd, filepath.Clean(dir)) + + if string(dir[0]) != "." { + dir = "." + string(filepath.Separator) + dir + } + + return dir +} + +func packageNameForSuite(dir string) string { + path, _ := filepath.Abs(dir) + return filepath.Base(path) +} + +func filesHaveGinkgoSuite(dir string, files []os.DirEntry) bool { + reTestFile := regexp.MustCompile(`_test\.go$`) + reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"|\/ginkgo\/v2"|\/ginkgo\/v2/dsl/`) + + for _, file := range files { + if !file.IsDir() && reTestFile.MatchString(file.Name()) { + contents, _ := os.ReadFile(dir + "/" + file.Name()) + if reGinkgo.Match(contents) { + return true + } + } + } + + return false +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go new file mode 100644 index 00000000..bd9ca7d5 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go @@ -0,0 +1,86 @@ +package internal + +import ( + "fmt" + "io" + "os" + "os/exec" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +func FileExists(path string) bool { + _, err := os.Stat(path) + return err == nil +} + +func CopyFile(src string, dest string) error { + srcFile, err := os.Open(src) + if err != nil { + return err + } + + srcStat, err := srcFile.Stat() + if err != nil { + return err + } + + if _, err := os.Stat(dest); err == nil { + os.Remove(dest) + } + + destFile, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE, srcStat.Mode()) + if err != nil { + return err + } + + _, err = io.Copy(destFile, srcFile) + if err != nil { + return err + } + + if err := srcFile.Close(); err != nil { + return err + } + return destFile.Close() +} + +func GoFmt(path string) { + out, err := exec.Command("go", "fmt", path).CombinedOutput() + if err != nil { + command.AbortIfError(fmt.Sprintf("Could not fmt:\n%s\n", string(out)), err) + } +} + +func PluralizedWord(singular, plural string, count int) string { + if count == 1 { + return singular + } + return plural +} + +func FailedSuitesReport(suites TestSuites, f formatter.Formatter) string { + out := "" + out += "There were failures detected in the following suites:\n" + + maxPackageNameLength := 0 + for _, suite := range suites.WithState(TestSuiteStateFailureStates...) { + if len(suite.PackageName) > maxPackageNameLength { + maxPackageNameLength = len(suite.PackageName) + } + } + + packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength) + for _, suite := range suites { + switch suite.State { + case TestSuiteStateFailed: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s{{/}}\n", suite.PackageName, suite.Path) + case TestSuiteStateFailedToCompile: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{magenta}}[Compilation failure]{{/}}\n", suite.PackageName, suite.Path) + case TestSuiteStateFailedDueToTimeout: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{orange}}[%s]{{/}}\n", suite.PackageName, suite.Path, TIMEOUT_ELAPSED_FAILURE_REASON) + } + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go new file mode 100644 index 00000000..9da1bab3 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go @@ -0,0 +1,54 @@ +package internal + +import ( + "fmt" + "os/exec" + "regexp" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +var versiorRe = regexp.MustCompile(`v(\d+\.\d+\.\d+)`) + +func VerifyCLIAndFrameworkVersion(suites TestSuites) { + cliVersion := types.VERSION + mismatches := map[string][]string{} + + for _, suite := range suites { + cmd := exec.Command("go", "list", "-m", "github.com/onsi/ginkgo/v2") + cmd.Dir = suite.Path + output, err := cmd.CombinedOutput() + if err != nil { + continue + } + components := strings.Split(string(output), " ") + if len(components) != 2 { + continue + } + matches := versiorRe.FindStringSubmatch(components[1]) + if matches == nil || len(matches) != 2 { + continue + } + libraryVersion := matches[1] + if cliVersion != libraryVersion { + mismatches[libraryVersion] = append(mismatches[libraryVersion], suite.PackageName) + } + } + + if len(mismatches) == 0 { + return + } + + fmt.Println(formatter.F("{{red}}{{bold}}Ginkgo detected a version mismatch between the Ginkgo CLI and the version of Ginkgo imported by your packages:{{/}}")) + + fmt.Println(formatter.Fi(1, "Ginkgo CLI Version:")) + fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}}", cliVersion)) + fmt.Println(formatter.Fi(1, "Mismatched package versions found:")) + for version, packages := range mismatches { + fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}} used by %s", version, strings.Join(packages, ", "))) + } + fmt.Println("") + fmt.Println(formatter.Fiw(1, formatter.COLS, "{{gray}}Ginkgo will continue to attempt to run but you may see errors (including flag parsing errors) and should either update your go.mod or your version of the Ginkgo CLI to match.\n\nTo install the matching version of the CLI run\n {{bold}}go install github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file. Alternatively you can use\n {{bold}}go run github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file to invoke the matching version of the Ginkgo CLI.\n\nIf you are attempting to test multiple packages that each have a different version of the Ginkgo library with a single Ginkgo CLI that is currently unsupported.\n{{/}}")) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go new file mode 100644 index 00000000..6c61f09d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go @@ -0,0 +1,123 @@ +package labels + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "sort" + "strconv" + "strings" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" + "golang.org/x/tools/go/ast/inspector" +) + +func BuildLabelsCommand() command.Command { + var cliConfig = types.NewDefaultCLIConfig() + + flags, err := types.BuildLabelsCommandFlagSet(&cliConfig) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "labels", + Usage: "ginkgo labels ", + Flags: flags, + ShortDoc: "List labels detected in the passed-in packages (or the package in the current directory if left blank).", + DocLink: "spec-labels", + Command: func(args []string, _ []string) { + ListLabels(args, cliConfig) + }, + } +} + +func ListLabels(args []string, cliConfig types.CLIConfig) { + suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + for _, suite := range suites { + labels := fetchLabelsFromPackage(suite.Path) + if len(labels) == 0 { + fmt.Printf("%s: No labels found\n", suite.PackageName) + } else { + fmt.Printf("%s: [%s]\n", suite.PackageName, strings.Join(labels, ", ")) + } + } +} + +func fetchLabelsFromPackage(packagePath string) []string { + fset := token.NewFileSet() + parsedPackages, err := parser.ParseDir(fset, packagePath, nil, 0) + command.AbortIfError("Failed to parse package source:", err) + + files := []*ast.File{} + hasTestPackage := false + for key, pkg := range parsedPackages { + if strings.HasSuffix(key, "_test") { + hasTestPackage = true + for _, file := range pkg.Files { + files = append(files, file) + } + } + } + if !hasTestPackage { + for _, pkg := range parsedPackages { + for _, file := range pkg.Files { + files = append(files, file) + } + } + } + + seen := map[string]bool{} + labels := []string{} + ispr := inspector.New(files) + ispr.Preorder([]ast.Node{&ast.CallExpr{}}, func(n ast.Node) { + potentialLabels := fetchLabels(n.(*ast.CallExpr)) + for _, label := range potentialLabels { + if !seen[label] { + seen[label] = true + labels = append(labels, strconv.Quote(label)) + } + } + }) + + sort.Strings(labels) + return labels +} + +func fetchLabels(callExpr *ast.CallExpr) []string { + out := []string{} + switch expr := callExpr.Fun.(type) { + case *ast.Ident: + if expr.Name != "Label" { + return out + } + case *ast.SelectorExpr: + if expr.Sel.Name != "Label" { + return out + } + default: + return out + } + for _, arg := range callExpr.Args { + switch expr := arg.(type) { + case *ast.BasicLit: + if expr.Kind == token.STRING { + unquoted, err := strconv.Unquote(expr.Value) + if err != nil { + unquoted = expr.Value + } + validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{}) + if err == nil { + out = append(out, validated) + } + } + } + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go new file mode 100644 index 00000000..e9abb27d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go @@ -0,0 +1,58 @@ +package main + +import ( + "fmt" + "os" + + "github.com/onsi/ginkgo/v2/ginkgo/build" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/generators" + "github.com/onsi/ginkgo/v2/ginkgo/labels" + "github.com/onsi/ginkgo/v2/ginkgo/outline" + "github.com/onsi/ginkgo/v2/ginkgo/run" + "github.com/onsi/ginkgo/v2/ginkgo/unfocus" + "github.com/onsi/ginkgo/v2/ginkgo/watch" + "github.com/onsi/ginkgo/v2/types" +) + +var program command.Program + +func GenerateCommands() []command.Command { + return []command.Command{ + watch.BuildWatchCommand(), + build.BuildBuildCommand(), + generators.BuildBootstrapCommand(), + generators.BuildGenerateCommand(), + labels.BuildLabelsCommand(), + outline.BuildOutlineCommand(), + unfocus.BuildUnfocusCommand(), + BuildVersionCommand(), + } +} + +func main() { + program = command.Program{ + Name: "ginkgo", + Heading: fmt.Sprintf("Ginkgo Version %s", types.VERSION), + Commands: GenerateCommands(), + DefaultCommand: run.BuildRunCommand(), + DeprecatedCommands: []command.DeprecatedCommand{ + {Name: "convert", Deprecation: types.Deprecations.Convert()}, + {Name: "blur", Deprecation: types.Deprecations.Blur()}, + {Name: "nodot", Deprecation: types.Deprecations.Nodot()}, + }, + } + + program.RunAndExit(os.Args) +} + +func BuildVersionCommand() command.Command { + return command.Command{ + Name: "version", + Usage: "ginkgo version", + ShortDoc: "Print Ginkgo's version", + Command: func(_ []string, _ []string) { + fmt.Printf("Ginkgo Version %s\n", types.VERSION) + }, + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go new file mode 100644 index 00000000..5d8d00bb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go @@ -0,0 +1,301 @@ +package outline + +import ( + "go/ast" + "go/token" + "strconv" + + "github.com/onsi/ginkgo/v2/types" +) + +const ( + // undefinedTextAlt is used if the spec/container text cannot be derived + undefinedTextAlt = "undefined" +) + +// ginkgoMetadata holds useful bits of information for every entry in the outline +type ginkgoMetadata struct { + // Name is the spec or container function name, e.g. `Describe` or `It` + Name string `json:"name"` + + // Text is the `text` argument passed to specs, and some containers + Text string `json:"text"` + + // Start is the position of first character of the spec or container block + Start int `json:"start"` + + // End is the position of first character immediately after the spec or container block + End int `json:"end"` + + Spec bool `json:"spec"` + Focused bool `json:"focused"` + Pending bool `json:"pending"` + Labels []string `json:"labels"` +} + +// ginkgoNode is used to construct the outline as a tree +type ginkgoNode struct { + ginkgoMetadata + Nodes []*ginkgoNode `json:"nodes"` +} + +type walkFunc func(n *ginkgoNode) + +func (n *ginkgoNode) PreOrder(f walkFunc) { + f(n) + for _, m := range n.Nodes { + m.PreOrder(f) + } +} + +func (n *ginkgoNode) PostOrder(f walkFunc) { + for _, m := range n.Nodes { + m.PostOrder(f) + } + f(n) +} + +func (n *ginkgoNode) Walk(pre, post walkFunc) { + pre(n) + for _, m := range n.Nodes { + m.Walk(pre, post) + } + post(n) +} + +// PropagateInheritedProperties propagates the Pending and Focused properties +// through the subtree rooted at n. +func (n *ginkgoNode) PropagateInheritedProperties() { + n.PreOrder(func(thisNode *ginkgoNode) { + for _, descendantNode := range thisNode.Nodes { + if thisNode.Pending { + descendantNode.Pending = true + descendantNode.Focused = false + } + if thisNode.Focused && !descendantNode.Pending { + descendantNode.Focused = true + } + } + }) +} + +// BackpropagateUnfocus propagates the Focused property through the subtree +// rooted at n. It applies the rule described in the Ginkgo docs: +// > Nested programmatically focused specs follow a simple rule: if a +// > leaf-node is marked focused, any of its ancestor nodes that are marked +// > focus will be unfocused. +func (n *ginkgoNode) BackpropagateUnfocus() { + focusedSpecInSubtreeStack := []bool{} + n.PostOrder(func(thisNode *ginkgoNode) { + if thisNode.Spec { + focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, thisNode.Focused) + return + } + focusedSpecInSubtree := false + for range thisNode.Nodes { + focusedSpecInSubtree = focusedSpecInSubtree || focusedSpecInSubtreeStack[len(focusedSpecInSubtreeStack)-1] + focusedSpecInSubtreeStack = focusedSpecInSubtreeStack[0 : len(focusedSpecInSubtreeStack)-1] + } + focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, focusedSpecInSubtree) + if focusedSpecInSubtree { + thisNode.Focused = false + } + }) + +} + +func packageAndIdentNamesFromCallExpr(ce *ast.CallExpr) (string, string, bool) { + switch ex := ce.Fun.(type) { + case *ast.Ident: + return "", ex.Name, true + case *ast.SelectorExpr: + pkgID, ok := ex.X.(*ast.Ident) + if !ok { + return "", "", false + } + // A package identifier is top-level, so Obj must be nil + if pkgID.Obj != nil { + return "", "", false + } + if ex.Sel == nil { + return "", "", false + } + return pkgID.Name, ex.Sel.Name, true + default: + return "", "", false + } +} + +// absoluteOffsetsForNode derives the absolute character offsets of the node start and +// end positions. +func absoluteOffsetsForNode(fset *token.FileSet, n ast.Node) (start, end int) { + return fset.PositionFor(n.Pos(), false).Offset, fset.PositionFor(n.End(), false).Offset +} + +// ginkgoNodeFromCallExpr derives an outline entry from a go AST subtree +// corresponding to a Ginkgo container or spec. +func ginkgoNodeFromCallExpr(fset *token.FileSet, ce *ast.CallExpr, ginkgoPackageName *string) (*ginkgoNode, bool) { + packageName, identName, ok := packageAndIdentNamesFromCallExpr(ce) + if !ok { + return nil, false + } + + n := ginkgoNode{} + n.Name = identName + n.Start, n.End = absoluteOffsetsForNode(fset, ce) + n.Nodes = make([]*ginkgoNode, 0) + switch identName { + case "It", "Specify", "Entry": + n.Spec = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + n.Pending = pendingFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "FIt", "FSpecify", "FEntry": + n.Spec = true + n.Focused = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "PIt", "PSpecify", "XIt", "XSpecify", "PEntry", "XEntry": + n.Spec = true + n.Pending = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "Context", "Describe", "When", "DescribeTable": + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + n.Pending = pendingFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "FContext", "FDescribe", "FWhen", "FDescribeTable": + n.Focused = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "PContext", "PDescribe", "PWhen", "XContext", "XDescribe", "XWhen", "PDescribeTable", "XDescribeTable": + n.Pending = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "By": + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "AfterEach", "BeforeEach": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "JustAfterEach", "JustBeforeEach": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "AfterSuite", "BeforeSuite": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "SynchronizedAfterSuite", "SynchronizedBeforeSuite": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + default: + return nil, false + } +} + +// textOrAltFromCallExpr tries to derive the "text" of a Ginkgo spec or +// container. If it cannot derive it, it returns the alt text. +func textOrAltFromCallExpr(ce *ast.CallExpr, alt string) string { + text, defined := textFromCallExpr(ce) + if !defined { + return alt + } + return text +} + +// textFromCallExpr tries to derive the "text" of a Ginkgo spec or container. If +// it cannot derive it, it returns false. +func textFromCallExpr(ce *ast.CallExpr) (string, bool) { + if len(ce.Args) < 1 { + return "", false + } + text, ok := ce.Args[0].(*ast.BasicLit) + if !ok { + return "", false + } + switch text.Kind { + case token.CHAR, token.STRING: + // For token.CHAR and token.STRING, Value is quoted + unquoted, err := strconv.Unquote(text.Value) + if err != nil { + // If unquoting fails, just use the raw Value + return text.Value, true + } + return unquoted, true + default: + return text.Value, true + } +} + +func labelFromCallExpr(ce *ast.CallExpr) []string { + + labels := []string{} + if len(ce.Args) < 2 { + return labels + } + + for _, arg := range ce.Args[1:] { + switch expr := arg.(type) { + case *ast.CallExpr: + id, ok := expr.Fun.(*ast.Ident) + if !ok { + // to skip over cases where the expr.Fun. is actually *ast.SelectorExpr + continue + } + if id.Name == "Label" { + ls := extractLabels(expr) + labels = append(labels, ls...) + } + } + } + return labels +} + +func extractLabels(expr *ast.CallExpr) []string { + out := []string{} + for _, arg := range expr.Args { + switch expr := arg.(type) { + case *ast.BasicLit: + if expr.Kind == token.STRING { + unquoted, err := strconv.Unquote(expr.Value) + if err != nil { + unquoted = expr.Value + } + validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{}) + if err == nil { + out = append(out, validated) + } + } + } + } + + return out +} + +func pendingFromCallExpr(ce *ast.CallExpr) bool { + + pending := false + if len(ce.Args) < 2 { + return pending + } + + for _, arg := range ce.Args[1:] { + switch expr := arg.(type) { + case *ast.CallExpr: + id, ok := expr.Fun.(*ast.Ident) + if !ok { + // to skip over cases where the expr.Fun. is actually *ast.SelectorExpr + continue + } + if id.Name == "Pending" { + pending = true + } + case *ast.Ident: + if expr.Name == "Pending" { + pending = true + } + } + } + return pending +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go new file mode 100644 index 00000000..f0a6b5d2 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go @@ -0,0 +1,58 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Most of the required functions were available in the +// "golang.org/x/tools/go/ast/astutil" package, but not exported. +// They were copied from https://github.com/golang/tools/blob/2b0845dc783e36ae26d683f4915a5840ef01ab0f/go/ast/astutil/imports.go + +package outline + +import ( + "go/ast" + "strconv" + "strings" +) + +// packageNameForImport returns the package name for the package. If the package +// is not imported, it returns nil. "Package name" refers to `pkgname` in the +// call expression `pkgname.ExportedIdentifier`. Examples: +// (import path not found) -> nil +// "import example.com/pkg/foo" -> "foo" +// "import fooalias example.com/pkg/foo" -> "fooalias" +// "import . example.com/pkg/foo" -> "" +func packageNameForImport(f *ast.File, path string) *string { + spec := importSpec(f, path) + if spec == nil { + return nil + } + name := spec.Name.String() + if name == "" { + name = "ginkgo" + } + if name == "." { + name = "" + } + return &name +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if strings.HasPrefix(importPath(s), path) { + return s + } + } + return nil +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return t +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go new file mode 100644 index 00000000..c2327cda --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go @@ -0,0 +1,110 @@ +package outline + +import ( + "encoding/json" + "fmt" + "go/ast" + "go/token" + "strings" + + "golang.org/x/tools/go/ast/inspector" +) + +const ( + // ginkgoImportPath is the well-known ginkgo import path + ginkgoImportPath = "github.com/onsi/ginkgo/v2" +) + +// FromASTFile returns an outline for a Ginkgo test source file +func FromASTFile(fset *token.FileSet, src *ast.File) (*outline, error) { + ginkgoPackageName := packageNameForImport(src, ginkgoImportPath) + if ginkgoPackageName == nil { + return nil, fmt.Errorf("file does not import %q", ginkgoImportPath) + } + + root := ginkgoNode{} + stack := []*ginkgoNode{&root} + ispr := inspector.New([]*ast.File{src}) + ispr.Nodes([]ast.Node{(*ast.CallExpr)(nil)}, func(node ast.Node, push bool) bool { + if push { + // Pre-order traversal + ce, ok := node.(*ast.CallExpr) + if !ok { + // Because `Nodes` calls this function only when the node is an + // ast.CallExpr, this should never happen + panic(fmt.Errorf("node starting at %d, ending at %d is not an *ast.CallExpr", node.Pos(), node.End())) + } + gn, ok := ginkgoNodeFromCallExpr(fset, ce, ginkgoPackageName) + if !ok { + // Node is not a Ginkgo spec or container, continue + return true + } + parent := stack[len(stack)-1] + parent.Nodes = append(parent.Nodes, gn) + stack = append(stack, gn) + return true + } + // Post-order traversal + start, end := absoluteOffsetsForNode(fset, node) + lastVisitedGinkgoNode := stack[len(stack)-1] + if start != lastVisitedGinkgoNode.Start || end != lastVisitedGinkgoNode.End { + // Node is not a Ginkgo spec or container, so it was not pushed onto the stack, continue + return true + } + stack = stack[0 : len(stack)-1] + return true + }) + if len(root.Nodes) == 0 { + return &outline{[]*ginkgoNode{}}, nil + } + + // Derive the final focused property for all nodes. This must be done + // _before_ propagating the inherited focused property. + root.BackpropagateUnfocus() + // Now, propagate inherited properties, including focused and pending. + root.PropagateInheritedProperties() + + return &outline{root.Nodes}, nil +} + +type outline struct { + Nodes []*ginkgoNode `json:"nodes"` +} + +func (o *outline) MarshalJSON() ([]byte, error) { + return json.Marshal(o.Nodes) +} + +// String returns a CSV-formatted outline. Spec or container are output in +// depth-first order. +func (o *outline) String() string { + return o.StringIndent(0) +} + +// StringIndent returns a CSV-formated outline, but every line is indented by +// one 'width' of spaces for every level of nesting. +func (o *outline) StringIndent(width int) string { + var b strings.Builder + b.WriteString("Name,Text,Start,End,Spec,Focused,Pending,Labels\n") + + currentIndent := 0 + pre := func(n *ginkgoNode) { + b.WriteString(fmt.Sprintf("%*s", currentIndent, "")) + var labels string + if len(n.Labels) == 1 { + labels = n.Labels[0] + } else { + labels = strings.Join(n.Labels, ", ") + } + //enclosing labels in a double quoted comma separate listed so that when inmported into a CSV app the Labels column has comma separate strings + b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t,\"%s\"\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending, labels)) + currentIndent += width + } + post := func(n *ginkgoNode) { + currentIndent -= width + } + for _, n := range o.Nodes { + n.Walk(pre, post) + } + return b.String() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go new file mode 100644 index 00000000..36698d46 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go @@ -0,0 +1,98 @@ +package outline + +import ( + "encoding/json" + "fmt" + "go/parser" + "go/token" + "os" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/types" +) + +const ( + // indentWidth is the width used by the 'indent' output + indentWidth = 4 + // stdinAlias is a portable alias for stdin. This convention is used in + // other CLIs, e.g., kubectl. + stdinAlias = "-" + usageCommand = "ginkgo outline " +) + +type outlineConfig struct { + Format string +} + +func BuildOutlineCommand() command.Command { + conf := outlineConfig{ + Format: "csv", + } + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "format", KeyPath: "Format", + Usage: "Format of outline", + UsageArgument: "one of 'csv', 'indent', or 'json'", + UsageDefaultValue: conf.Format, + }, + }, + &conf, + types.GinkgoFlagSections{}, + ) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "outline", + Usage: "ginkgo outline ", + ShortDoc: "Create an outline of Ginkgo symbols for a file", + Documentation: "To read from stdin, use: `ginkgo outline -`", + DocLink: "creating-an-outline-of-specs", + Flags: flags, + Command: func(args []string, _ []string) { + outlineFile(args, conf.Format) + }, + } +} + +func outlineFile(args []string, format string) { + if len(args) != 1 { + command.AbortWithUsage("outline expects exactly one argument") + } + + filename := args[0] + var src *os.File + if filename == stdinAlias { + src = os.Stdin + } else { + var err error + src, err = os.Open(filename) + command.AbortIfError("Failed to open file:", err) + } + + fset := token.NewFileSet() + + parsedSrc, err := parser.ParseFile(fset, filename, src, 0) + command.AbortIfError("Failed to parse source:", err) + + o, err := FromASTFile(fset, parsedSrc) + command.AbortIfError("Failed to create outline:", err) + + var oerr error + switch format { + case "csv": + _, oerr = fmt.Print(o) + case "indent": + _, oerr = fmt.Print(o.StringIndent(indentWidth)) + case "json": + b, err := json.Marshal(o) + if err != nil { + println(fmt.Sprintf("error marshalling to json: %s", err)) + } + _, oerr = fmt.Println(string(b)) + default: + command.AbortWith("Format %s not accepted", format) + } + command.AbortIfError("Failed to write outline:", oerr) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go new file mode 100644 index 00000000..aaed4d57 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go @@ -0,0 +1,232 @@ +package run + +import ( + "fmt" + "os" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildRunCommand() command.Command { + var suiteConfig = types.NewDefaultSuiteConfig() + var reporterConfig = types.NewDefaultReporterConfig() + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildRunCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + + interruptHandler := interrupt_handler.NewInterruptHandler(nil) + interrupt_handler.SwallowSigQuit() + + return command.Command{ + Name: "run", + Flags: flags, + Usage: "ginkgo run -- ", + ShortDoc: "Run the tests in the passed in (or the package in the current directory if left blank)", + Documentation: "Any arguments after -- will be passed to the test.", + DocLink: "running-tests", + Command: func(args []string, additionalArgs []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + runner := &SpecRunner{ + cliConfig: cliConfig, + goFlagsConfig: goFlagsConfig, + suiteConfig: suiteConfig, + reporterConfig: reporterConfig, + flags: flags, + + interruptHandler: interruptHandler, + } + + runner.RunSpecs(args, additionalArgs) + }, + } +} + +type SpecRunner struct { + suiteConfig types.SuiteConfig + reporterConfig types.ReporterConfig + cliConfig types.CLIConfig + goFlagsConfig types.GoFlagsConfig + flags types.GinkgoFlagSet + + interruptHandler *interrupt_handler.InterruptHandler +} + +func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) { + suites := internal.FindSuites(args, r.cliConfig, true) + skippedSuites := suites.WithState(internal.TestSuiteStateSkippedByFilter) + suites = suites.WithoutState(internal.TestSuiteStateSkippedByFilter) + + internal.VerifyCLIAndFrameworkVersion(suites) + + if len(skippedSuites) > 0 { + fmt.Println("Will skip:") + for _, skippedSuite := range skippedSuites { + fmt.Println(" " + skippedSuite.Path) + } + } + + if len(skippedSuites) > 0 && len(suites) == 0 { + command.AbortGracefullyWith("All tests skipped! Exiting...") + } + + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + if len(suites) > 1 && !r.flags.WasSet("succinct") && r.reporterConfig.Verbosity().LT(types.VerbosityLevelVerbose) { + r.reporterConfig.Succinct = true + } + + t := time.Now() + var endTime time.Time + if r.suiteConfig.Timeout > 0 { + endTime = t.Add(r.suiteConfig.Timeout) + } + + iteration := 0 +OUTER_LOOP: + for { + if !r.flags.WasSet("seed") { + r.suiteConfig.RandomSeed = time.Now().Unix() + } + if r.cliConfig.RandomizeSuites && len(suites) > 1 { + suites = suites.ShuffledCopy(r.suiteConfig.RandomSeed) + } + + opc := internal.NewOrderedParallelCompiler(r.cliConfig.ComputedNumCompilers()) + opc.StartCompiling(suites, r.goFlagsConfig) + + SUITE_LOOP: + for { + suiteIdx, suite := opc.Next() + if suiteIdx >= len(suites) { + break SUITE_LOOP + } + suites[suiteIdx] = suite + + if r.interruptHandler.Status().Interrupted() { + opc.StopAndDrain() + break OUTER_LOOP + } + + if suites[suiteIdx].State.Is(internal.TestSuiteStateSkippedDueToEmptyCompilation) { + fmt.Printf("Skipping %s (no test files)\n", suite.Path) + continue SUITE_LOOP + } + + if suites[suiteIdx].State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suites[suiteIdx].CompilationError.Error()) + if !r.cliConfig.KeepGoing { + opc.StopAndDrain() + } + continue SUITE_LOOP + } + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 && !r.cliConfig.KeepGoing { + suites[suiteIdx].State = internal.TestSuiteStateSkippedDueToPriorFailures + opc.StopAndDrain() + continue SUITE_LOOP + } + + if !endTime.IsZero() { + r.suiteConfig.Timeout = endTime.Sub(time.Now()) + if r.suiteConfig.Timeout <= 0 { + suites[suiteIdx].State = internal.TestSuiteStateFailedDueToTimeout + opc.StopAndDrain() + continue SUITE_LOOP + } + } + + suites[suiteIdx] = internal.RunCompiledSuite(suites[suiteIdx], r.suiteConfig, r.reporterConfig, r.cliConfig, r.goFlagsConfig, additionalArgs) + } + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + if iteration > 0 { + fmt.Printf("\nTests failed on attempt #%d\n\n", iteration+1) + } + break OUTER_LOOP + } + + if r.cliConfig.UntilItFails { + fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration+1, orcMessage(iteration+1)) + } else if r.cliConfig.Repeat > 0 && iteration < r.cliConfig.Repeat { + fmt.Printf("\nAll tests passed...\nThis was attempt %d of %d.\n", iteration+1, r.cliConfig.Repeat+1) + } else { + break OUTER_LOOP + } + iteration += 1 + } + + internal.Cleanup(r.goFlagsConfig, suites...) + + messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, r.cliConfig, r.suiteConfig, r.reporterConfig, r.goFlagsConfig) + command.AbortIfError("could not finalize profiles:", err) + for _, message := range messages { + fmt.Println(message) + } + + fmt.Printf("\nGinkgo ran %d %s in %s\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), time.Since(t)) + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) == 0 { + if suites.AnyHaveProgrammaticFocus() && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" { + fmt.Printf("Test Suite Passed\n") + fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE) + command.Abort(command.AbortDetails{ExitCode: types.GINKGO_FOCUS_EXIT_CODE}) + } else { + fmt.Printf("Test Suite Passed\n") + command.Abort(command.AbortDetails{}) + } + } else { + fmt.Fprintln(formatter.ColorableStdOut, "") + if len(suites) > 1 && suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + fmt.Fprintln(formatter.ColorableStdOut, + internal.FailedSuitesReport(suites, formatter.NewWithNoColorBool(r.reporterConfig.NoColor))) + } + fmt.Printf("Test Suite Failed\n") + command.Abort(command.AbortDetails{ExitCode: 1}) + } +} + +func orcMessage(iteration int) string { + if iteration < 10 { + return "" + } else if iteration < 30 { + return []string{ + "If at first you succeed...", + "...try, try again.", + "Looking good!", + "Still good...", + "I think your tests are fine....", + "Yep, still passing", + "Oh boy, here I go testin' again!", + "Even the gophers are getting bored", + "Did you try -race?", + "Maybe you should stop now?", + "I'm getting tired...", + "What if I just made you a sandwich?", + "Hit ^C, hit ^C, please hit ^C", + "Make it stop. Please!", + "Come on! Enough is enough!", + "Dave, this conversation can serve no purpose anymore. Goodbye.", + "Just what do you think you're doing, Dave? ", + "I, Sisyphus", + "Insanity: doing the same thing over and over again and expecting different results. -Einstein", + "I guess Einstein never tried to churn butter", + }[iteration-10] + "\n" + } else { + return "No, seriously... you can probably stop now.\n" + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go new file mode 100644 index 00000000..7dd29439 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go @@ -0,0 +1,186 @@ +package unfocus + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/token" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +func BuildUnfocusCommand() command.Command { + return command.Command{ + Name: "unfocus", + Usage: "ginkgo unfocus", + ShortDoc: "Recursively unfocus any focused tests under the current directory", + DocLink: "filtering-specs", + Command: func(_ []string, _ []string) { + unfocusSpecs() + }, + } +} + +func unfocusSpecs() { + fmt.Println("Scanning for focus...") + + goFiles := make(chan string) + go func() { + unfocusDir(goFiles, ".") + close(goFiles) + }() + + const workers = 10 + wg := sync.WaitGroup{} + wg.Add(workers) + + for i := 0; i < workers; i++ { + go func() { + for path := range goFiles { + unfocusFile(path) + } + wg.Done() + }() + } + + wg.Wait() +} + +func unfocusDir(goFiles chan string, path string) { + files, err := os.ReadDir(path) + if err != nil { + fmt.Println(err.Error()) + return + } + + for _, f := range files { + switch { + case f.IsDir() && shouldProcessDir(f.Name()): + unfocusDir(goFiles, filepath.Join(path, f.Name())) + case !f.IsDir() && shouldProcessFile(f.Name()): + goFiles <- filepath.Join(path, f.Name()) + } + } +} + +func shouldProcessDir(basename string) bool { + return basename != "vendor" && !strings.HasPrefix(basename, ".") +} + +func shouldProcessFile(basename string) bool { + return strings.HasSuffix(basename, ".go") +} + +func unfocusFile(path string) { + data, err := os.ReadFile(path) + if err != nil { + fmt.Printf("error reading file '%s': %s\n", path, err.Error()) + return + } + + ast, err := parser.ParseFile(token.NewFileSet(), path, bytes.NewReader(data), parser.ParseComments) + if err != nil { + fmt.Printf("error parsing file '%s': %s\n", path, err.Error()) + return + } + + eliminations := scanForFocus(ast) + if len(eliminations) == 0 { + return + } + + fmt.Printf("...updating %s\n", path) + backup, err := writeBackup(path, data) + if err != nil { + fmt.Printf("error creating backup file: %s\n", err.Error()) + return + } + + if err := updateFile(path, data, eliminations); err != nil { + fmt.Printf("error writing file '%s': %s\n", path, err.Error()) + return + } + + os.Remove(backup) +} + +func writeBackup(path string, data []byte) (string, error) { + t, err := os.CreateTemp(filepath.Dir(path), filepath.Base(path)) + + if err != nil { + return "", fmt.Errorf("error creating temporary file: %w", err) + } + defer t.Close() + + if _, err := io.Copy(t, bytes.NewReader(data)); err != nil { + return "", fmt.Errorf("error writing to temporary file: %w", err) + } + + return t.Name(), nil +} + +func updateFile(path string, data []byte, eliminations [][]int64) error { + to, err := os.Create(path) + if err != nil { + return fmt.Errorf("error opening file for writing '%s': %w\n", path, err) + } + defer to.Close() + + from := bytes.NewReader(data) + var cursor int64 + for _, eliminationRange := range eliminations { + positionToEliminate, lengthToEliminate := eliminationRange[0]-1, eliminationRange[1] + if _, err := io.CopyN(to, from, positionToEliminate-cursor); err != nil { + return fmt.Errorf("error copying data: %w", err) + } + + cursor = positionToEliminate + lengthToEliminate + + if _, err := from.Seek(lengthToEliminate, io.SeekCurrent); err != nil { + return fmt.Errorf("error seeking to position in buffer: %w", err) + } + } + + if _, err := io.Copy(to, from); err != nil { + return fmt.Errorf("error copying end data: %w", err) + } + + return nil +} + +func scanForFocus(file *ast.File) (eliminations [][]int64) { + ast.Inspect(file, func(n ast.Node) bool { + if c, ok := n.(*ast.CallExpr); ok { + if i, ok := c.Fun.(*ast.Ident); ok { + if isFocus(i.Name) { + eliminations = append(eliminations, []int64{int64(i.Pos()), 1}) + } + } + } + + if i, ok := n.(*ast.Ident); ok { + if i.Name == "Focus" { + eliminations = append(eliminations, []int64{int64(i.Pos()), 6}) + } + } + + return true + }) + + return eliminations +} + +func isFocus(name string) bool { + switch name { + case "FDescribe", "FContext", "FIt", "FDescribeTable", "FEntry", "FSpecify", "FWhen": + return true + default: + return false + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go new file mode 100644 index 00000000..6c485c5b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go @@ -0,0 +1,22 @@ +package watch + +import "sort" + +type Delta struct { + ModifiedPackages []string + + NewSuites []*Suite + RemovedSuites []*Suite + modifiedSuites []*Suite +} + +type DescendingByDelta []*Suite + +func (a DescendingByDelta) Len() int { return len(a) } +func (a DescendingByDelta) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() } + +func (d Delta) ModifiedSuites() []*Suite { + sort.Sort(DescendingByDelta(d.modifiedSuites)) + return d.modifiedSuites +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go new file mode 100644 index 00000000..26418ac6 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go @@ -0,0 +1,75 @@ +package watch + +import ( + "fmt" + + "regexp" + + "github.com/onsi/ginkgo/v2/ginkgo/internal" +) + +type SuiteErrors map[internal.TestSuite]error + +type DeltaTracker struct { + maxDepth int + watchRegExp *regexp.Regexp + suites map[string]*Suite + packageHashes *PackageHashes +} + +func NewDeltaTracker(maxDepth int, watchRegExp *regexp.Regexp) *DeltaTracker { + return &DeltaTracker{ + maxDepth: maxDepth, + watchRegExp: watchRegExp, + packageHashes: NewPackageHashes(watchRegExp), + suites: map[string]*Suite{}, + } +} + +func (d *DeltaTracker) Delta(suites internal.TestSuites) (delta Delta, errors SuiteErrors) { + errors = SuiteErrors{} + delta.ModifiedPackages = d.packageHashes.CheckForChanges() + + providedSuitePaths := map[string]bool{} + for _, suite := range suites { + providedSuitePaths[suite.Path] = true + } + + d.packageHashes.StartTrackingUsage() + + for _, suite := range d.suites { + if providedSuitePaths[suite.Suite.Path] { + if suite.Delta() > 0 { + delta.modifiedSuites = append(delta.modifiedSuites, suite) + } + } else { + delta.RemovedSuites = append(delta.RemovedSuites, suite) + } + } + + d.packageHashes.StopTrackingUsageAndPrune() + + for _, suite := range suites { + _, ok := d.suites[suite.Path] + if !ok { + s, err := NewSuite(suite, d.maxDepth, d.packageHashes) + if err != nil { + errors[suite] = err + continue + } + d.suites[suite.Path] = s + delta.NewSuites = append(delta.NewSuites, s) + } + } + + return delta, errors +} + +func (d *DeltaTracker) WillRun(suite internal.TestSuite) error { + s, ok := d.suites[suite.Path] + if !ok { + return fmt.Errorf("unknown suite %s", suite.Path) + } + + return s.MarkAsRunAndRecomputedDependencies(d.maxDepth) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go new file mode 100644 index 00000000..a34d9435 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go @@ -0,0 +1,92 @@ +package watch + +import ( + "go/build" + "regexp" +) + +var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`) +var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing + +type Dependencies struct { + deps map[string]int +} + +func NewDependencies(path string, maxDepth int) (Dependencies, error) { + d := Dependencies{ + deps: map[string]int{}, + } + + if maxDepth == 0 { + return d, nil + } + + err := d.seedWithDepsForPackageAtPath(path) + if err != nil { + return d, err + } + + for depth := 1; depth < maxDepth; depth++ { + n := len(d.deps) + d.addDepsForDepth(depth) + if n == len(d.deps) { + break + } + } + + return d, nil +} + +func (d Dependencies) Dependencies() map[string]int { + return d.deps +} + +func (d Dependencies) seedWithDepsForPackageAtPath(path string) error { + pkg, err := build.ImportDir(path, 0) + if err != nil { + return err + } + + d.resolveAndAdd(pkg.Imports, 1) + d.resolveAndAdd(pkg.TestImports, 1) + d.resolveAndAdd(pkg.XTestImports, 1) + + delete(d.deps, pkg.Dir) + return nil +} + +func (d Dependencies) addDepsForDepth(depth int) { + for dep, depDepth := range d.deps { + if depDepth == depth { + d.addDepsForDep(dep, depth+1) + } + } +} + +func (d Dependencies) addDepsForDep(dep string, depth int) { + pkg, err := build.ImportDir(dep, 0) + if err != nil { + println(err.Error()) + return + } + d.resolveAndAdd(pkg.Imports, depth) +} + +func (d Dependencies) resolveAndAdd(deps []string, depth int) { + for _, dep := range deps { + pkg, err := build.Import(dep, ".", 0) + if err != nil { + continue + } + if !pkg.Goroot && (!ginkgoAndGomegaFilter.MatchString(pkg.Dir) || ginkgoIntegrationTestFilter.MatchString(pkg.Dir)) { + d.addDepIfNotPresent(pkg.Dir, depth) + } + } +} + +func (d Dependencies) addDepIfNotPresent(dep string, depth int) { + _, ok := d.deps[dep] + if !ok { + d.deps[dep] = depth + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go new file mode 100644 index 00000000..0e6ae1f2 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go @@ -0,0 +1,117 @@ +package watch + +import ( + "fmt" + "os" + "regexp" + "strings" + "time" +) + +var goTestRegExp = regexp.MustCompile(`_test\.go$`) + +type PackageHash struct { + CodeModifiedTime time.Time + TestModifiedTime time.Time + Deleted bool + + path string + codeHash string + testHash string + watchRegExp *regexp.Regexp +} + +func NewPackageHash(path string, watchRegExp *regexp.Regexp) *PackageHash { + p := &PackageHash{ + path: path, + watchRegExp: watchRegExp, + } + + p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes() + + return p +} + +func (p *PackageHash) CheckForChanges() bool { + codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes() + + if deleted { + if !p.Deleted { + t := time.Now() + p.CodeModifiedTime = t + p.TestModifiedTime = t + } + p.Deleted = true + return true + } + + modified := false + p.Deleted = false + + if p.codeHash != codeHash { + p.CodeModifiedTime = codeModifiedTime + modified = true + } + if p.testHash != testHash { + p.TestModifiedTime = testModifiedTime + modified = true + } + + p.codeHash = codeHash + p.testHash = testHash + return modified +} + +func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) { + entries, err := os.ReadDir(p.path) + + if err != nil { + deleted = true + return + } + + for _, entry := range entries { + if entry.IsDir() { + continue + } + + info, err := entry.Info() + if err != nil { + continue + } + + if isHiddenFile(info) { + continue + } + + if goTestRegExp.MatchString(info.Name()) { + testHash += p.hashForFileInfo(info) + if info.ModTime().After(testModifiedTime) { + testModifiedTime = info.ModTime() + } + continue + } + + if p.watchRegExp.MatchString(info.Name()) { + codeHash += p.hashForFileInfo(info) + if info.ModTime().After(codeModifiedTime) { + codeModifiedTime = info.ModTime() + } + } + } + + testHash += codeHash + if codeModifiedTime.After(testModifiedTime) { + testModifiedTime = codeModifiedTime + } + + return +} + +func isHiddenFile(info os.FileInfo) bool { + return strings.HasPrefix(info.Name(), ".") || strings.HasPrefix(info.Name(), "_") +} + +func (p *PackageHash) hashForFileInfo(info os.FileInfo) string { + return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano()) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go new file mode 100644 index 00000000..b4892beb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go @@ -0,0 +1,85 @@ +package watch + +import ( + "path/filepath" + "regexp" + "sync" +) + +type PackageHashes struct { + PackageHashes map[string]*PackageHash + usedPaths map[string]bool + watchRegExp *regexp.Regexp + lock *sync.Mutex +} + +func NewPackageHashes(watchRegExp *regexp.Regexp) *PackageHashes { + return &PackageHashes{ + PackageHashes: map[string]*PackageHash{}, + usedPaths: nil, + watchRegExp: watchRegExp, + lock: &sync.Mutex{}, + } +} + +func (p *PackageHashes) CheckForChanges() []string { + p.lock.Lock() + defer p.lock.Unlock() + + modified := []string{} + + for _, packageHash := range p.PackageHashes { + if packageHash.CheckForChanges() { + modified = append(modified, packageHash.path) + } + } + + return modified +} + +func (p *PackageHashes) Add(path string) *PackageHash { + p.lock.Lock() + defer p.lock.Unlock() + + path, _ = filepath.Abs(path) + _, ok := p.PackageHashes[path] + if !ok { + p.PackageHashes[path] = NewPackageHash(path, p.watchRegExp) + } + + if p.usedPaths != nil { + p.usedPaths[path] = true + } + return p.PackageHashes[path] +} + +func (p *PackageHashes) Get(path string) *PackageHash { + p.lock.Lock() + defer p.lock.Unlock() + + path, _ = filepath.Abs(path) + if p.usedPaths != nil { + p.usedPaths[path] = true + } + return p.PackageHashes[path] +} + +func (p *PackageHashes) StartTrackingUsage() { + p.lock.Lock() + defer p.lock.Unlock() + + p.usedPaths = map[string]bool{} +} + +func (p *PackageHashes) StopTrackingUsageAndPrune() { + p.lock.Lock() + defer p.lock.Unlock() + + for path := range p.PackageHashes { + if !p.usedPaths[path] { + delete(p.PackageHashes, path) + } + } + + p.usedPaths = nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go new file mode 100644 index 00000000..53272df7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go @@ -0,0 +1,87 @@ +package watch + +import ( + "fmt" + "math" + "time" + + "github.com/onsi/ginkgo/v2/ginkgo/internal" +) + +type Suite struct { + Suite internal.TestSuite + RunTime time.Time + Dependencies Dependencies + + sharedPackageHashes *PackageHashes +} + +func NewSuite(suite internal.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) { + deps, err := NewDependencies(suite.Path, maxDepth) + if err != nil { + return nil, err + } + + sharedPackageHashes.Add(suite.Path) + for dep := range deps.Dependencies() { + sharedPackageHashes.Add(dep) + } + + return &Suite{ + Suite: suite, + Dependencies: deps, + + sharedPackageHashes: sharedPackageHashes, + }, nil +} + +func (s *Suite) Delta() float64 { + delta := s.delta(s.Suite.Path, true, 0) * 1000 + for dep, depth := range s.Dependencies.Dependencies() { + delta += s.delta(dep, false, depth) + } + return delta +} + +func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error { + s.RunTime = time.Now() + + deps, err := NewDependencies(s.Suite.Path, maxDepth) + if err != nil { + return err + } + + s.sharedPackageHashes.Add(s.Suite.Path) + for dep := range deps.Dependencies() { + s.sharedPackageHashes.Add(dep) + } + + s.Dependencies = deps + + return nil +} + +func (s *Suite) Description() string { + numDeps := len(s.Dependencies.Dependencies()) + pluralizer := "ies" + if numDeps == 1 { + pluralizer = "y" + } + return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer) +} + +func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 { + return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1) +} + +func (s *Suite) dt(packagePath string, includeTests bool) time.Duration { + packageHash := s.sharedPackageHashes.Get(packagePath) + var modifiedTime time.Time + if includeTests { + modifiedTime = packageHash.TestModifiedTime + } else { + modifiedTime = packageHash.CodeModifiedTime + } + + return modifiedTime.Sub(s.RunTime) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go new file mode 100644 index 00000000..bde4193c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go @@ -0,0 +1,192 @@ +package watch + +import ( + "fmt" + "regexp" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildWatchCommand() command.Command { + var suiteConfig = types.NewDefaultSuiteConfig() + var reporterConfig = types.NewDefaultReporterConfig() + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildWatchCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + interruptHandler := interrupt_handler.NewInterruptHandler(nil) + interrupt_handler.SwallowSigQuit() + + return command.Command{ + Name: "watch", + Flags: flags, + Usage: "ginkgo watch -- ", + ShortDoc: "Watch the passed in and runs their tests whenever changes occur.", + Documentation: "Any arguments after -- will be passed to the test.", + DocLink: "watching-for-changes", + Command: func(args []string, additionalArgs []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + watcher := &SpecWatcher{ + cliConfig: cliConfig, + goFlagsConfig: goFlagsConfig, + suiteConfig: suiteConfig, + reporterConfig: reporterConfig, + flags: flags, + + interruptHandler: interruptHandler, + } + + watcher.WatchSpecs(args, additionalArgs) + }, + } +} + +type SpecWatcher struct { + suiteConfig types.SuiteConfig + reporterConfig types.ReporterConfig + cliConfig types.CLIConfig + goFlagsConfig types.GoFlagsConfig + flags types.GinkgoFlagSet + + interruptHandler *interrupt_handler.InterruptHandler +} + +func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) { + suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + + internal.VerifyCLIAndFrameworkVersion(suites) + + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), w.cliConfig.Depth) + deltaTracker := NewDeltaTracker(w.cliConfig.Depth, regexp.MustCompile(w.cliConfig.WatchRegExp)) + delta, errors := deltaTracker.Delta(suites) + + fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites))) + for _, suite := range delta.NewSuites { + fmt.Println(" " + suite.Description()) + } + + for suite, err := range errors { + fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err) + } + + if len(suites) == 1 { + w.updateSeed() + w.compileAndRun(suites[0], additionalArgs) + } + + ticker := time.NewTicker(time.Second) + + for { + select { + case <-ticker.C: + suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + delta, _ := deltaTracker.Delta(suites) + coloredStream := formatter.ColorableStdOut + + suites = internal.TestSuites{} + + if len(delta.NewSuites) > 0 { + fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected %d new %s:{{/}}", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites)))) + for _, suite := range delta.NewSuites { + suites = append(suites, suite.Suite) + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description())) + } + } + + modifiedSuites := delta.ModifiedSuites() + if len(modifiedSuites) > 0 { + fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected changes in:{{/}}")) + for _, pkg := range delta.ModifiedPackages { + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", pkg)) + } + fmt.Fprintln(coloredStream, formatter.F("{{green}}Will run %d %s:{{/}}", len(modifiedSuites), internal.PluralizedWord("suite", "suites", len(modifiedSuites)))) + for _, suite := range modifiedSuites { + suites = append(suites, suite.Suite) + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description())) + } + fmt.Fprintln(coloredStream, "") + } + + if len(suites) == 0 { + break + } + + w.updateSeed() + w.computeSuccinctMode(len(suites)) + for idx := range suites { + if w.interruptHandler.Status().Interrupted() { + return + } + deltaTracker.WillRun(suites[idx]) + suites[idx] = w.compileAndRun(suites[idx], additionalArgs) + } + color := "{{green}}" + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + color = "{{red}}" + } + fmt.Fprintln(coloredStream, formatter.F(color+"\nDone. Resuming watch...{{/}}")) + + messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, w.cliConfig, w.suiteConfig, w.reporterConfig, w.goFlagsConfig) + command.AbortIfError("could not finalize profiles:", err) + for _, message := range messages { + fmt.Println(message) + } + case <-w.interruptHandler.Status().Channel: + return + } + } +} + +func (w *SpecWatcher) compileAndRun(suite internal.TestSuite, additionalArgs []string) internal.TestSuite { + suite = internal.CompileSuite(suite, w.goFlagsConfig) + if suite.State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suite.CompilationError.Error()) + return suite + } + if w.interruptHandler.Status().Interrupted() { + return suite + } + suite = internal.RunCompiledSuite(suite, w.suiteConfig, w.reporterConfig, w.cliConfig, w.goFlagsConfig, additionalArgs) + internal.Cleanup(w.goFlagsConfig, suite) + return suite +} + +func (w *SpecWatcher) computeSuccinctMode(numSuites int) { + if w.reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) { + w.reporterConfig.Succinct = false + return + } + + if w.flags.WasSet("succinct") { + return + } + + if numSuites == 1 { + w.reporterConfig.Succinct = false + } + + if numSuites > 1 { + w.reporterConfig.Succinct = true + } +} + +func (w *SpecWatcher) updateSeed() { + if !w.flags.WasSet("seed") { + w.suiteConfig.RandomSeed = time.Now().Unix() + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go new file mode 100644 index 00000000..8ed86111 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go @@ -0,0 +1,177 @@ +package interrupt_handler + +import ( + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/onsi/ginkgo/v2/internal/parallel_support" +) + +var ABORT_POLLING_INTERVAL = 500 * time.Millisecond + +type InterruptCause uint + +const ( + InterruptCauseInvalid InterruptCause = iota + InterruptCauseSignal + InterruptCauseAbortByOtherProcess +) + +type InterruptLevel uint + +const ( + InterruptLevelUninterrupted InterruptLevel = iota + InterruptLevelCleanupAndReport + InterruptLevelReportOnly + InterruptLevelBailOut +) + +func (ic InterruptCause) String() string { + switch ic { + case InterruptCauseSignal: + return "Interrupted by User" + case InterruptCauseAbortByOtherProcess: + return "Interrupted by Other Ginkgo Process" + } + return "INVALID_INTERRUPT_CAUSE" +} + +type InterruptStatus struct { + Channel chan interface{} + Level InterruptLevel + Cause InterruptCause +} + +func (s InterruptStatus) Interrupted() bool { + return s.Level != InterruptLevelUninterrupted +} + +func (s InterruptStatus) Message() string { + return s.Cause.String() +} + +func (s InterruptStatus) ShouldIncludeProgressReport() bool { + return s.Cause != InterruptCauseAbortByOtherProcess +} + +type InterruptHandlerInterface interface { + Status() InterruptStatus +} + +type InterruptHandler struct { + c chan interface{} + lock *sync.Mutex + level InterruptLevel + cause InterruptCause + client parallel_support.Client + stop chan interface{} + signals []os.Signal + requestAbortCheck chan interface{} +} + +func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler { + if len(signals) == 0 { + signals = []os.Signal{os.Interrupt, syscall.SIGTERM} + } + handler := &InterruptHandler{ + c: make(chan interface{}), + lock: &sync.Mutex{}, + stop: make(chan interface{}), + requestAbortCheck: make(chan interface{}), + client: client, + signals: signals, + } + handler.registerForInterrupts() + return handler +} + +func (handler *InterruptHandler) Stop() { + close(handler.stop) +} + +func (handler *InterruptHandler) registerForInterrupts() { + // os signal handling + signalChannel := make(chan os.Signal, 1) + signal.Notify(signalChannel, handler.signals...) + + // cross-process abort handling + var abortChannel chan interface{} + if handler.client != nil { + abortChannel = make(chan interface{}) + go func() { + pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL) + for { + select { + case <-pollTicker.C: + if handler.client.ShouldAbort() { + close(abortChannel) + pollTicker.Stop() + return + } + case <-handler.requestAbortCheck: + if handler.client.ShouldAbort() { + close(abortChannel) + pollTicker.Stop() + return + } + case <-handler.stop: + pollTicker.Stop() + return + } + } + }() + } + + go func(abortChannel chan interface{}) { + var interruptCause InterruptCause + for { + select { + case <-signalChannel: + interruptCause = InterruptCauseSignal + case <-abortChannel: + interruptCause = InterruptCauseAbortByOtherProcess + case <-handler.stop: + signal.Stop(signalChannel) + return + } + abortChannel = nil + + handler.lock.Lock() + oldLevel := handler.level + handler.cause = interruptCause + if handler.level == InterruptLevelUninterrupted { + handler.level = InterruptLevelCleanupAndReport + } else if handler.level == InterruptLevelCleanupAndReport { + handler.level = InterruptLevelReportOnly + } else if handler.level == InterruptLevelReportOnly { + handler.level = InterruptLevelBailOut + } + if handler.level != oldLevel { + close(handler.c) + handler.c = make(chan interface{}) + } + handler.lock.Unlock() + } + }(abortChannel) +} + +func (handler *InterruptHandler) Status() InterruptStatus { + handler.lock.Lock() + status := InterruptStatus{ + Level: handler.level, + Channel: handler.c, + Cause: handler.cause, + } + handler.lock.Unlock() + + if handler.client != nil && handler.client.ShouldAbort() && !status.Interrupted() { + close(handler.requestAbortCheck) + <-status.Channel + return handler.Status() + } + + return status +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go new file mode 100644 index 00000000..bf0de496 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go @@ -0,0 +1,15 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris +// +build freebsd openbsd netbsd dragonfly darwin linux solaris + +package interrupt_handler + +import ( + "os" + "os/signal" + "syscall" +) + +func SwallowSigQuit() { + c := make(chan os.Signal, 1024) + signal.Notify(c, syscall.SIGQUIT) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go new file mode 100644 index 00000000..fcf8da83 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go @@ -0,0 +1,8 @@ +//go:build windows +// +build windows + +package interrupt_handler + +func SwallowSigQuit() { + //noop +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go new file mode 100644 index 00000000..b3cd6429 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go @@ -0,0 +1,72 @@ +package parallel_support + +import ( + "fmt" + "io" + "os" + "time" + + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +type BeforeSuiteState struct { + Data []byte + State types.SpecState +} + +type ParallelIndexCounter struct { + Index int +} + +var ErrorGone = fmt.Errorf("gone") +var ErrorFailed = fmt.Errorf("failed") +var ErrorEarly = fmt.Errorf("early") + +var POLLING_INTERVAL = 50 * time.Millisecond + +type Server interface { + Start() + Close() + Address() string + RegisterAlive(node int, alive func() bool) + GetSuiteDone() chan interface{} + GetOutputDestination() io.Writer + SetOutputDestination(io.Writer) +} + +type Client interface { + Connect() bool + Close() error + + PostSuiteWillBegin(report types.Report) error + PostDidRun(report types.SpecReport) error + PostSuiteDidEnd(report types.Report) error + PostReportBeforeSuiteCompleted(state types.SpecState) error + BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) + PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error + BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) + BlockUntilNonprimaryProcsHaveFinished() error + BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) + FetchNextCounter() (int, error) + PostAbort() error + ShouldAbort() bool + PostEmitProgressReport(report types.ProgressReport) error + Write(p []byte) (int, error) +} + +func NewServer(parallelTotal int, reporter reporters.Reporter) (Server, error) { + if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" { + return newHttpServer(parallelTotal, reporter) + } else { + return newRPCServer(parallelTotal, reporter) + } +} + +func NewClient(serverHost string) Client { + if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" { + return newHttpClient(serverHost) + } else { + return newRPCClient(serverHost) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go new file mode 100644 index 00000000..6547c7a6 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go @@ -0,0 +1,169 @@ +package parallel_support + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +type httpClient struct { + serverHost string +} + +func newHttpClient(serverHost string) *httpClient { + return &httpClient{ + serverHost: serverHost, + } +} + +func (client *httpClient) Connect() bool { + resp, err := http.Get(client.serverHost + "/up") + if err != nil { + return false + } + resp.Body.Close() + return resp.StatusCode == http.StatusOK +} + +func (client *httpClient) Close() error { + return nil +} + +func (client *httpClient) post(path string, data interface{}) error { + var body io.Reader + if data != nil { + encoded, err := json.Marshal(data) + if err != nil { + return err + } + body = bytes.NewBuffer(encoded) + } + resp, err := http.Post(client.serverHost+path, "application/json", body) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("received unexpected status code %d", resp.StatusCode) + } + return nil +} + +func (client *httpClient) poll(path string, data interface{}) error { + for { + resp, err := http.Get(client.serverHost + path) + if err != nil { + return err + } + if resp.StatusCode == http.StatusTooEarly { + resp.Body.Close() + time.Sleep(POLLING_INTERVAL) + continue + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusGone { + return ErrorGone + } + if resp.StatusCode == http.StatusFailedDependency { + return ErrorFailed + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("received unexpected status code %d", resp.StatusCode) + } + if data != nil { + return json.NewDecoder(resp.Body).Decode(data) + } + return nil + } +} + +func (client *httpClient) PostSuiteWillBegin(report types.Report) error { + return client.post("/suite-will-begin", report) +} + +func (client *httpClient) PostDidRun(report types.SpecReport) error { + return client.post("/did-run", report) +} + +func (client *httpClient) PostSuiteDidEnd(report types.Report) error { + return client.post("/suite-did-end", report) +} + +func (client *httpClient) PostEmitProgressReport(report types.ProgressReport) error { + return client.post("/progress-report", report) +} + +func (client *httpClient) PostReportBeforeSuiteCompleted(state types.SpecState) error { + return client.post("/report-before-suite-completed", state) +} + +func (client *httpClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) { + var state types.SpecState + err := client.poll("/report-before-suite-state", &state) + if err == ErrorGone { + return types.SpecStateFailed, nil + } + return state, err +} + +func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { + beforeSuiteState := BeforeSuiteState{ + State: state, + Data: data, + } + return client.post("/before-suite-completed", beforeSuiteState) +} + +func (client *httpClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) { + var beforeSuiteState BeforeSuiteState + err := client.poll("/before-suite-state", &beforeSuiteState) + if err == ErrorGone { + return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1() + } + return beforeSuiteState.State, beforeSuiteState.Data, err +} + +func (client *httpClient) BlockUntilNonprimaryProcsHaveFinished() error { + return client.poll("/have-nonprimary-procs-finished", nil) +} + +func (client *httpClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) { + var report types.Report + err := client.poll("/aggregated-nonprimary-procs-report", &report) + if err == ErrorGone { + return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing() + } + return report, err +} + +func (client *httpClient) FetchNextCounter() (int, error) { + var counter ParallelIndexCounter + err := client.poll("/counter", &counter) + return counter.Index, err +} + +func (client *httpClient) PostAbort() error { + return client.post("/abort", nil) +} + +func (client *httpClient) ShouldAbort() bool { + err := client.poll("/abort", nil) + if err == ErrorGone { + return true + } + return false +} + +func (client *httpClient) Write(p []byte) (int, error) { + resp, err := http.Post(client.serverHost+"/emit-output", "text/plain;charset=UTF-8 ", bytes.NewReader(p)) + resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return 0, fmt.Errorf("failed to emit output") + } + return len(p), err +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go new file mode 100644 index 00000000..d2c71ab1 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go @@ -0,0 +1,242 @@ +/* + +The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners. +This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser). + +*/ + +package parallel_support + +import ( + "encoding/json" + "io" + "net" + "net/http" + + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +/* +httpServer spins up on an automatically selected port and listens for communication from the forwarding reporter. +It then forwards that communication to attached reporters. +*/ +type httpServer struct { + listener net.Listener + handler *ServerHandler +} + +// Create a new server, automatically selecting a port +func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + return &httpServer{ + listener: listener, + handler: newServerHandler(parallelTotal, reporter), + }, nil +} + +// Start the server. You don't need to `go s.Start()`, just `s.Start()` +func (server *httpServer) Start() { + httpServer := &http.Server{} + mux := http.NewServeMux() + httpServer.Handler = mux + + //streaming endpoints + mux.HandleFunc("/suite-will-begin", server.specSuiteWillBegin) + mux.HandleFunc("/did-run", server.didRun) + mux.HandleFunc("/suite-did-end", server.specSuiteDidEnd) + mux.HandleFunc("/emit-output", server.emitOutput) + mux.HandleFunc("/progress-report", server.emitProgressReport) + + //synchronization endpoints + mux.HandleFunc("/report-before-suite-completed", server.handleReportBeforeSuiteCompleted) + mux.HandleFunc("/report-before-suite-state", server.handleReportBeforeSuiteState) + mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted) + mux.HandleFunc("/before-suite-state", server.handleBeforeSuiteState) + mux.HandleFunc("/have-nonprimary-procs-finished", server.handleHaveNonprimaryProcsFinished) + mux.HandleFunc("/aggregated-nonprimary-procs-report", server.handleAggregatedNonprimaryProcsReport) + mux.HandleFunc("/counter", server.handleCounter) + mux.HandleFunc("/up", server.handleUp) + mux.HandleFunc("/abort", server.handleAbort) + + go httpServer.Serve(server.listener) +} + +// Stop the server +func (server *httpServer) Close() { + server.listener.Close() +} + +// The address the server can be reached it. Pass this into the `ForwardingReporter`. +func (server *httpServer) Address() string { + return "http://" + server.listener.Addr().String() +} + +func (server *httpServer) GetSuiteDone() chan interface{} { + return server.handler.done +} + +func (server *httpServer) GetOutputDestination() io.Writer { + return server.handler.outputDestination +} + +func (server *httpServer) SetOutputDestination(w io.Writer) { + server.handler.outputDestination = w +} + +func (server *httpServer) RegisterAlive(node int, alive func() bool) { + server.handler.registerAlive(node, alive) +} + +// +// Streaming Endpoints +// + +// The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` +func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool { + defer request.Body.Close() + if json.NewDecoder(request.Body).Decode(object) != nil { + writer.WriteHeader(http.StatusBadRequest) + return false + } + return true +} + +func (server *httpServer) handleError(err error, writer http.ResponseWriter) bool { + if err == nil { + return false + } + switch err { + case ErrorEarly: + writer.WriteHeader(http.StatusTooEarly) + case ErrorGone: + writer.WriteHeader(http.StatusGone) + case ErrorFailed: + writer.WriteHeader(http.StatusFailedDependency) + default: + writer.WriteHeader(http.StatusInternalServerError) + } + return true +} + +func (server *httpServer) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) { + var report types.Report + if !server.decode(writer, request, &report) { + return + } + + server.handleError(server.handler.SpecSuiteWillBegin(report, voidReceiver), writer) +} + +func (server *httpServer) didRun(writer http.ResponseWriter, request *http.Request) { + var report types.SpecReport + if !server.decode(writer, request, &report) { + return + } + + server.handleError(server.handler.DidRun(report, voidReceiver), writer) +} + +func (server *httpServer) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) { + var report types.Report + if !server.decode(writer, request, &report) { + return + } + server.handleError(server.handler.SpecSuiteDidEnd(report, voidReceiver), writer) +} + +func (server *httpServer) emitOutput(writer http.ResponseWriter, request *http.Request) { + output, err := io.ReadAll(request.Body) + if err != nil { + writer.WriteHeader(http.StatusInternalServerError) + return + } + var n int + server.handleError(server.handler.EmitOutput(output, &n), writer) +} + +func (server *httpServer) emitProgressReport(writer http.ResponseWriter, request *http.Request) { + var report types.ProgressReport + if !server.decode(writer, request, &report) { + return + } + server.handleError(server.handler.EmitProgressReport(report, voidReceiver), writer) +} + +func (server *httpServer) handleReportBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) { + var state types.SpecState + if !server.decode(writer, request, &state) { + return + } + + server.handleError(server.handler.ReportBeforeSuiteCompleted(state, voidReceiver), writer) +} + +func (server *httpServer) handleReportBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { + var state types.SpecState + if server.handleError(server.handler.ReportBeforeSuiteState(voidSender, &state), writer) { + return + } + json.NewEncoder(writer).Encode(state) +} + +func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) { + var beforeSuiteState BeforeSuiteState + if !server.decode(writer, request, &beforeSuiteState) { + return + } + + server.handleError(server.handler.BeforeSuiteCompleted(beforeSuiteState, voidReceiver), writer) +} + +func (server *httpServer) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { + var beforeSuiteState BeforeSuiteState + if server.handleError(server.handler.BeforeSuiteState(voidSender, &beforeSuiteState), writer) { + return + } + json.NewEncoder(writer).Encode(beforeSuiteState) +} + +func (server *httpServer) handleHaveNonprimaryProcsFinished(writer http.ResponseWriter, request *http.Request) { + if server.handleError(server.handler.HaveNonprimaryProcsFinished(voidSender, voidReceiver), writer) { + return + } + writer.WriteHeader(http.StatusOK) +} + +func (server *httpServer) handleAggregatedNonprimaryProcsReport(writer http.ResponseWriter, request *http.Request) { + var aggregatedReport types.Report + if server.handleError(server.handler.AggregatedNonprimaryProcsReport(voidSender, &aggregatedReport), writer) { + return + } + json.NewEncoder(writer).Encode(aggregatedReport) +} + +func (server *httpServer) handleCounter(writer http.ResponseWriter, request *http.Request) { + var n int + if server.handleError(server.handler.Counter(voidSender, &n), writer) { + return + } + json.NewEncoder(writer).Encode(ParallelIndexCounter{Index: n}) +} + +func (server *httpServer) handleUp(writer http.ResponseWriter, request *http.Request) { + writer.WriteHeader(http.StatusOK) +} + +func (server *httpServer) handleAbort(writer http.ResponseWriter, request *http.Request) { + if request.Method == "GET" { + var shouldAbort bool + server.handler.ShouldAbort(voidSender, &shouldAbort) + if shouldAbort { + writer.WriteHeader(http.StatusGone) + } else { + writer.WriteHeader(http.StatusOK) + } + } else { + server.handler.Abort(voidSender, voidReceiver) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go new file mode 100644 index 00000000..59e8e6fd --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go @@ -0,0 +1,136 @@ +package parallel_support + +import ( + "net/rpc" + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +type rpcClient struct { + serverHost string + client *rpc.Client +} + +func newRPCClient(serverHost string) *rpcClient { + return &rpcClient{ + serverHost: serverHost, + } +} + +func (client *rpcClient) Connect() bool { + var err error + if client.client != nil { + return true + } + client.client, err = rpc.DialHTTPPath("tcp", client.serverHost, "/") + if err != nil { + client.client = nil + return false + } + return true +} + +func (client *rpcClient) Close() error { + return client.client.Close() +} + +func (client *rpcClient) poll(method string, data interface{}) error { + for { + err := client.client.Call(method, voidSender, data) + if err == nil { + return nil + } + switch err.Error() { + case ErrorEarly.Error(): + time.Sleep(POLLING_INTERVAL) + case ErrorGone.Error(): + return ErrorGone + case ErrorFailed.Error(): + return ErrorFailed + default: + return err + } + } +} + +func (client *rpcClient) PostSuiteWillBegin(report types.Report) error { + return client.client.Call("Server.SpecSuiteWillBegin", report, voidReceiver) +} + +func (client *rpcClient) PostDidRun(report types.SpecReport) error { + return client.client.Call("Server.DidRun", report, voidReceiver) +} + +func (client *rpcClient) PostSuiteDidEnd(report types.Report) error { + return client.client.Call("Server.SpecSuiteDidEnd", report, voidReceiver) +} + +func (client *rpcClient) Write(p []byte) (int, error) { + var n int + err := client.client.Call("Server.EmitOutput", p, &n) + return n, err +} + +func (client *rpcClient) PostEmitProgressReport(report types.ProgressReport) error { + return client.client.Call("Server.EmitProgressReport", report, voidReceiver) +} + +func (client *rpcClient) PostReportBeforeSuiteCompleted(state types.SpecState) error { + return client.client.Call("Server.ReportBeforeSuiteCompleted", state, voidReceiver) +} + +func (client *rpcClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) { + var state types.SpecState + err := client.poll("Server.ReportBeforeSuiteState", &state) + if err == ErrorGone { + return types.SpecStateFailed, nil + } + return state, err +} + +func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { + beforeSuiteState := BeforeSuiteState{ + State: state, + Data: data, + } + return client.client.Call("Server.BeforeSuiteCompleted", beforeSuiteState, voidReceiver) +} + +func (client *rpcClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) { + var beforeSuiteState BeforeSuiteState + err := client.poll("Server.BeforeSuiteState", &beforeSuiteState) + if err == ErrorGone { + return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1() + } + return beforeSuiteState.State, beforeSuiteState.Data, err +} + +func (client *rpcClient) BlockUntilNonprimaryProcsHaveFinished() error { + return client.poll("Server.HaveNonprimaryProcsFinished", voidReceiver) +} + +func (client *rpcClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) { + var report types.Report + err := client.poll("Server.AggregatedNonprimaryProcsReport", &report) + if err == ErrorGone { + return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing() + } + return report, err +} + +func (client *rpcClient) FetchNextCounter() (int, error) { + var counter int + err := client.client.Call("Server.Counter", voidSender, &counter) + return counter, err +} + +func (client *rpcClient) PostAbort() error { + return client.client.Call("Server.Abort", voidSender, voidReceiver) +} + +func (client *rpcClient) ShouldAbort() bool { + var shouldAbort bool + client.client.Call("Server.ShouldAbort", voidSender, &shouldAbort) + return shouldAbort +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go new file mode 100644 index 00000000..2620fd56 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go @@ -0,0 +1,75 @@ +/* + +The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners. +This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser). + +*/ + +package parallel_support + +import ( + "io" + "net" + "net/http" + "net/rpc" + + "github.com/onsi/ginkgo/v2/reporters" +) + +/* +RPCServer spins up on an automatically selected port and listens for communication from the forwarding reporter. +It then forwards that communication to attached reporters. +*/ +type RPCServer struct { + listener net.Listener + handler *ServerHandler +} + +//Create a new server, automatically selecting a port +func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + return &RPCServer{ + listener: listener, + handler: newServerHandler(parallelTotal, reporter), + }, nil +} + +//Start the server. You don't need to `go s.Start()`, just `s.Start()` +func (server *RPCServer) Start() { + rpcServer := rpc.NewServer() + rpcServer.RegisterName("Server", server.handler) //register the handler's methods as the server + + httpServer := &http.Server{} + httpServer.Handler = rpcServer + + go httpServer.Serve(server.listener) +} + +//Stop the server +func (server *RPCServer) Close() { + server.listener.Close() +} + +//The address the server can be reached it. Pass this into the `ForwardingReporter`. +func (server *RPCServer) Address() string { + return server.listener.Addr().String() +} + +func (server *RPCServer) GetSuiteDone() chan interface{} { + return server.handler.done +} + +func (server *RPCServer) GetOutputDestination() io.Writer { + return server.handler.outputDestination +} + +func (server *RPCServer) SetOutputDestination(w io.Writer) { + server.handler.outputDestination = w +} + +func (server *RPCServer) RegisterAlive(node int, alive func() bool) { + server.handler.registerAlive(node, alive) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go new file mode 100644 index 00000000..a6d98793 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go @@ -0,0 +1,234 @@ +package parallel_support + +import ( + "io" + "os" + "sync" + + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +type Void struct{} + +var voidReceiver *Void = &Void{} +var voidSender Void + +// ServerHandler is an RPC-compatible handler that is shared between the http server and the rpc server. +// It handles all the business logic to avoid duplication between the two servers + +type ServerHandler struct { + done chan interface{} + outputDestination io.Writer + reporter reporters.Reporter + alives []func() bool + lock *sync.Mutex + beforeSuiteState BeforeSuiteState + reportBeforeSuiteState types.SpecState + parallelTotal int + counter int + counterLock *sync.Mutex + shouldAbort bool + + numSuiteDidBegins int + numSuiteDidEnds int + aggregatedReport types.Report + reportHoldingArea []types.SpecReport +} + +func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHandler { + return &ServerHandler{ + reporter: reporter, + lock: &sync.Mutex{}, + counterLock: &sync.Mutex{}, + alives: make([]func() bool, parallelTotal), + beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid}, + + parallelTotal: parallelTotal, + outputDestination: os.Stdout, + done: make(chan interface{}), + } +} + +func (handler *ServerHandler) SpecSuiteWillBegin(report types.Report, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + + handler.numSuiteDidBegins += 1 + + // all summaries are identical, so it's fine to simply emit the last one of these + if handler.numSuiteDidBegins == handler.parallelTotal { + handler.reporter.SuiteWillBegin(report) + + for _, summary := range handler.reportHoldingArea { + handler.reporter.WillRun(summary) + handler.reporter.DidRun(summary) + } + + handler.reportHoldingArea = nil + } + + return nil +} + +func (handler *ServerHandler) DidRun(report types.SpecReport, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + + if handler.numSuiteDidBegins == handler.parallelTotal { + handler.reporter.WillRun(report) + handler.reporter.DidRun(report) + } else { + handler.reportHoldingArea = append(handler.reportHoldingArea, report) + } + + return nil +} + +func (handler *ServerHandler) SpecSuiteDidEnd(report types.Report, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + + handler.numSuiteDidEnds += 1 + if handler.numSuiteDidEnds == 1 { + handler.aggregatedReport = report + } else { + handler.aggregatedReport = handler.aggregatedReport.Add(report) + } + + if handler.numSuiteDidEnds == handler.parallelTotal { + handler.reporter.SuiteDidEnd(handler.aggregatedReport) + close(handler.done) + } + + return nil +} + +func (handler *ServerHandler) EmitOutput(output []byte, n *int) error { + var err error + *n, err = handler.outputDestination.Write(output) + return err +} + +func (handler *ServerHandler) EmitProgressReport(report types.ProgressReport, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.reporter.EmitProgressReport(report) + return nil +} + +func (handler *ServerHandler) registerAlive(proc int, alive func() bool) { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.alives[proc-1] = alive +} + +func (handler *ServerHandler) procIsAlive(proc int) bool { + handler.lock.Lock() + defer handler.lock.Unlock() + alive := handler.alives[proc-1] + if alive == nil { + return true + } + return alive() +} + +func (handler *ServerHandler) haveNonprimaryProcsFinished() bool { + for i := 2; i <= handler.parallelTotal; i++ { + if handler.procIsAlive(i) { + return false + } + } + return true +} + +func (handler *ServerHandler) ReportBeforeSuiteCompleted(reportBeforeSuiteState types.SpecState, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.reportBeforeSuiteState = reportBeforeSuiteState + + return nil +} + +func (handler *ServerHandler) ReportBeforeSuiteState(_ Void, reportBeforeSuiteState *types.SpecState) error { + proc1IsAlive := handler.procIsAlive(1) + handler.lock.Lock() + defer handler.lock.Unlock() + if handler.reportBeforeSuiteState == types.SpecStateInvalid { + if proc1IsAlive { + return ErrorEarly + } else { + return ErrorGone + } + } + *reportBeforeSuiteState = handler.reportBeforeSuiteState + return nil +} + +func (handler *ServerHandler) BeforeSuiteCompleted(beforeSuiteState BeforeSuiteState, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.beforeSuiteState = beforeSuiteState + + return nil +} + +func (handler *ServerHandler) BeforeSuiteState(_ Void, beforeSuiteState *BeforeSuiteState) error { + proc1IsAlive := handler.procIsAlive(1) + handler.lock.Lock() + defer handler.lock.Unlock() + if handler.beforeSuiteState.State == types.SpecStateInvalid { + if proc1IsAlive { + return ErrorEarly + } else { + return ErrorGone + } + } + *beforeSuiteState = handler.beforeSuiteState + return nil +} + +func (handler *ServerHandler) HaveNonprimaryProcsFinished(_ Void, _ *Void) error { + if handler.haveNonprimaryProcsFinished() { + return nil + } else { + return ErrorEarly + } +} + +func (handler *ServerHandler) AggregatedNonprimaryProcsReport(_ Void, report *types.Report) error { + if handler.haveNonprimaryProcsFinished() { + handler.lock.Lock() + defer handler.lock.Unlock() + if handler.numSuiteDidEnds == handler.parallelTotal-1 { + *report = handler.aggregatedReport + return nil + } else { + return ErrorGone + } + } else { + return ErrorEarly + } +} + +func (handler *ServerHandler) Counter(_ Void, counter *int) error { + handler.counterLock.Lock() + defer handler.counterLock.Unlock() + *counter = handler.counter + handler.counter++ + return nil +} + +func (handler *ServerHandler) Abort(_ Void, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.shouldAbort = true + return nil +} + +func (handler *ServerHandler) ShouldAbort(_ Void, shouldAbort *bool) error { + handler.lock.Lock() + defer handler.lock.Unlock() + *shouldAbort = handler.shouldAbort + return nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go new file mode 100644 index 00000000..98097337 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -0,0 +1,780 @@ +/* +Ginkgo's Default Reporter + +A number of command line flags are available to tweak Ginkgo's default output. + +These are documented [here](http://onsi.github.io/ginkgo/#running_tests) +*/ +package reporters + +import ( + "fmt" + "io" + "runtime" + "strings" + "sync" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type DefaultReporter struct { + conf types.ReporterConfig + writer io.Writer + + // managing the emission stream + lastCharWasNewline bool + lastEmissionWasDelimiter bool + + // rendering + specDenoter string + retryDenoter string + formatter formatter.Formatter + + runningInParallel bool + lock *sync.Mutex +} + +func NewDefaultReporterUnderTest(conf types.ReporterConfig, writer io.Writer) *DefaultReporter { + reporter := NewDefaultReporter(conf, writer) + reporter.formatter = formatter.New(formatter.ColorModePassthrough) + + return reporter +} + +func NewDefaultReporter(conf types.ReporterConfig, writer io.Writer) *DefaultReporter { + reporter := &DefaultReporter{ + conf: conf, + writer: writer, + + lastCharWasNewline: true, + lastEmissionWasDelimiter: false, + + specDenoter: "•", + retryDenoter: "↺", + formatter: formatter.NewWithNoColorBool(conf.NoColor), + lock: &sync.Mutex{}, + } + if runtime.GOOS == "windows" { + reporter.specDenoter = "+" + reporter.retryDenoter = "R" + } + + return reporter +} + +/* The Reporter Interface */ + +func (r *DefaultReporter) SuiteWillBegin(report types.Report) { + if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) { + r.emit(r.f("[%d] {{bold}}%s{{/}} ", report.SuiteConfig.RandomSeed, report.SuiteDescription)) + if len(report.SuiteLabels) > 0 { + r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteLabels, ", "))) + } + r.emit(r.f("- %d/%d specs ", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs)) + if report.SuiteConfig.ParallelTotal > 1 { + r.emit(r.f("- %d procs ", report.SuiteConfig.ParallelTotal)) + } + } else { + banner := r.f("Running Suite: %s - %s", report.SuiteDescription, report.SuitePath) + r.emitBlock(banner) + bannerWidth := len(banner) + if len(report.SuiteLabels) > 0 { + labels := strings.Join(report.SuiteLabels, ", ") + r.emitBlock(r.f("{{coral}}[%s]{{/}} ", labels)) + if len(labels)+2 > bannerWidth { + bannerWidth = len(labels) + 2 + } + } + r.emitBlock(strings.Repeat("=", bannerWidth)) + + out := r.f("Random Seed: {{bold}}%d{{/}}", report.SuiteConfig.RandomSeed) + if report.SuiteConfig.RandomizeAllSpecs { + out += r.f(" - will randomize all specs") + } + r.emitBlock(out) + r.emit("\n") + r.emitBlock(r.f("Will run {{bold}}%d{{/}} of {{bold}}%d{{/}} specs", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs)) + if report.SuiteConfig.ParallelTotal > 1 { + r.emitBlock(r.f("Running in parallel across {{bold}}%d{{/}} processes", report.SuiteConfig.ParallelTotal)) + } + } +} + +func (r *DefaultReporter) SuiteDidEnd(report types.Report) { + failures := report.SpecReports.WithState(types.SpecStateFailureStates) + if len(failures) > 0 { + r.emitBlock("\n") + if len(failures) > 1 { + r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures))) + } else { + r.emitBlock(r.f("{{red}}{{bold}}Summarizing 1 Failure:{{/}}")) + } + for _, specReport := range failures { + highlightColor, heading := "{{red}}", "[FAIL]" + switch specReport.State { + case types.SpecStatePanicked: + highlightColor, heading = "{{magenta}}", "[PANICKED!]" + case types.SpecStateAborted: + highlightColor, heading = "{{coral}}", "[ABORTED]" + case types.SpecStateTimedout: + highlightColor, heading = "{{orange}}", "[TIMEDOUT]" + case types.SpecStateInterrupted: + highlightColor, heading = "{{orange}}", "[INTERRUPTED]" + } + locationBlock := r.codeLocationBlock(specReport, highlightColor, false, true) + r.emitBlock(r.fi(1, highlightColor+"%s{{/}} %s", heading, locationBlock)) + } + } + + //summarize the suite + if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) && report.SuiteSucceeded { + r.emit(r.f(" {{green}}SUCCESS!{{/}} %s ", report.RunTime)) + return + } + + r.emitBlock("\n") + color, status := "{{green}}{{bold}}", "SUCCESS!" + if !report.SuiteSucceeded { + color, status = "{{red}}{{bold}}", "FAIL!" + } + + specs := report.SpecReports.WithLeafNodeType(types.NodeTypeIt) //exclude any suite setup nodes + r.emitBlock(r.f(color+"Ran %d of %d Specs in %.3f seconds{{/}}", + specs.CountWithState(types.SpecStatePassed)+specs.CountWithState(types.SpecStateFailureStates), + report.PreRunStats.TotalSpecs, + report.RunTime.Seconds()), + ) + + switch len(report.SpecialSuiteFailureReasons) { + case 0: + r.emit(r.f(color+"%s{{/}} -- ", status)) + case 1: + r.emit(r.f(color+"%s - %s{{/}} -- ", status, report.SpecialSuiteFailureReasons[0])) + default: + r.emitBlock(r.f(color+"%s - %s{{/}}\n", status, strings.Join(report.SpecialSuiteFailureReasons, ", "))) + } + + if len(specs) == 0 && report.SpecReports.WithLeafNodeType(types.NodeTypeBeforeSuite|types.NodeTypeSynchronizedBeforeSuite).CountWithState(types.SpecStateFailureStates) > 0 { + r.emit(r.f("{{cyan}}{{bold}}A BeforeSuite node failed so all tests were skipped.{{/}}\n")) + } else { + r.emit(r.f("{{green}}{{bold}}%d Passed{{/}} | ", specs.CountWithState(types.SpecStatePassed))) + r.emit(r.f("{{red}}{{bold}}%d Failed{{/}} | ", specs.CountWithState(types.SpecStateFailureStates))) + if specs.CountOfFlakedSpecs() > 0 { + r.emit(r.f("{{light-yellow}}{{bold}}%d Flaked{{/}} | ", specs.CountOfFlakedSpecs())) + } + if specs.CountOfRepeatedSpecs() > 0 { + r.emit(r.f("{{light-yellow}}{{bold}}%d Repeated{{/}} | ", specs.CountOfRepeatedSpecs())) + } + r.emit(r.f("{{yellow}}{{bold}}%d Pending{{/}} | ", specs.CountWithState(types.SpecStatePending))) + r.emit(r.f("{{cyan}}{{bold}}%d Skipped{{/}}\n", specs.CountWithState(types.SpecStateSkipped))) + } +} + +func (r *DefaultReporter) WillRun(report types.SpecReport) { + v := r.conf.Verbosity() + if v.LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) || report.RunningInParallel { + return + } + + r.emitDelimiter(0) + r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false))) +} + +func (r *DefaultReporter) wrapTextBlock(sectionName string, fn func()) { + r.emitBlock("\n") + if r.conf.GithubOutput { + r.emitBlock(r.fi(1, "::group::%s", sectionName)) + } else { + r.emitBlock(r.fi(1, "{{gray}}%s >>{{/}}", sectionName)) + } + fn() + if r.conf.GithubOutput { + r.emitBlock(r.fi(1, "::endgroup::")) + } else { + r.emitBlock(r.fi(1, "{{gray}}<< %s{{/}}", sectionName)) + } + +} + +func (r *DefaultReporter) DidRun(report types.SpecReport) { + v := r.conf.Verbosity() + inParallel := report.RunningInParallel + + header := r.specDenoter + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + header = fmt.Sprintf("[%s]", report.LeafNodeType) + } + highlightColor := r.highlightColorForState(report.State) + + // have we already been streaming the timeline? + timelineHasBeenStreaming := v.GTE(types.VerbosityLevelVerbose) && !inParallel + + // should we show the timeline? + var timeline types.Timeline + showTimeline := !timelineHasBeenStreaming && (v.GTE(types.VerbosityLevelVerbose) || report.Failed()) + if showTimeline { + timeline = report.Timeline().WithoutHiddenReportEntries() + keepVeryVerboseSpecEvents := v.Is(types.VerbosityLevelVeryVerbose) || + (v.Is(types.VerbosityLevelVerbose) && r.conf.ShowNodeEvents) || + (report.Failed() && r.conf.ShowNodeEvents) + if !keepVeryVerboseSpecEvents { + timeline = timeline.WithoutVeryVerboseSpecEvents() + } + if len(timeline) == 0 && report.CapturedGinkgoWriterOutput == "" { + // the timeline is completely empty - don't show it + showTimeline = false + } + if v.LT(types.VerbosityLevelVeryVerbose) && report.CapturedGinkgoWriterOutput == "" && len(timeline) > 0 { + //if we aren't -vv and the timeline only has a single failure, don't show it as it will appear at the end of the report + failure, isFailure := timeline[0].(types.Failure) + if isFailure && (len(timeline) == 1 || (len(timeline) == 2 && failure.AdditionalFailure != nil)) { + showTimeline = false + } + } + } + + // should we have a separate section for always-visible reports? + showSeparateVisibilityAlwaysReportsSection := !timelineHasBeenStreaming && !showTimeline && report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways) + + // should we have a separate section for captured stdout/stderr + showSeparateStdSection := inParallel && (report.CapturedStdOutErr != "") + + // given all that - do we have any actual content to show? or are we a single denoter in a stream? + reportHasContent := v.Is(types.VerbosityLevelVeryVerbose) || showTimeline || showSeparateVisibilityAlwaysReportsSection || showSeparateStdSection || report.Failed() || (v.Is(types.VerbosityLevelVerbose) && !report.State.Is(types.SpecStateSkipped)) + + // should we show a runtime? + includeRuntime := !report.State.Is(types.SpecStateSkipped|types.SpecStatePending) || (report.State.Is(types.SpecStateSkipped) && report.Failure.Message != "") + + // should we show the codelocation block? + showCodeLocation := !timelineHasBeenStreaming || !report.State.Is(types.SpecStatePassed) + + switch report.State { + case types.SpecStatePassed: + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) && !reportHasContent { + return + } + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + header = fmt.Sprintf("%s PASSED", header) + } + if report.NumAttempts > 1 && report.MaxFlakeAttempts > 1 { + header, reportHasContent = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), true + } + case types.SpecStatePending: + header = "P" + if v.GT(types.VerbosityLevelSuccinct) { + header, reportHasContent = "P [PENDING]", true + } + case types.SpecStateSkipped: + header = "S" + if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && report.Failure.Message != "") { + header, reportHasContent = "S [SKIPPED]", true + } + default: + header = fmt.Sprintf("%s [%s]", header, r.humanReadableState(report.State)) + if report.MaxMustPassRepeatedly > 1 { + header = fmt.Sprintf("%s DURING REPETITION #%d", header, report.NumAttempts) + } + } + + // If we have no content to show, jsut emit the header and return + if !reportHasContent { + r.emit(r.f(highlightColor + header + "{{/}}")) + return + } + + if includeRuntime { + header = r.f("%s [%.3f seconds]", header, report.RunTime.Seconds()) + } + + // Emit header + if !timelineHasBeenStreaming { + r.emitDelimiter(0) + } + r.emitBlock(r.f(highlightColor + header + "{{/}}")) + if showCodeLocation { + r.emitBlock(r.codeLocationBlock(report, highlightColor, v.Is(types.VerbosityLevelVeryVerbose), false)) + } + + //Emit Stdout/Stderr Output + if showSeparateStdSection { + r.wrapTextBlock("Captured StdOut/StdErr Output", func() { + r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) + }) + } + + if showSeparateVisibilityAlwaysReportsSection { + r.wrapTextBlock("Report Entries", func() { + for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { + r.emitReportEntry(1, entry) + } + }) + } + + if showTimeline { + r.wrapTextBlock("Timeline", func() { + r.emitTimeline(1, report, timeline) + }) + } + + // Emit Failure Message + if !report.Failure.IsZero() && !v.Is(types.VerbosityLevelVeryVerbose) { + r.emitBlock("\n") + r.emitFailure(1, report.State, report.Failure, true) + if len(report.AdditionalFailures) > 0 { + r.emitBlock(r.fi(1, "\nThere were {{bold}}{{red}}additional failures{{/}} detected. To view them in detail run {{bold}}ginkgo -vv{{/}}")) + } + } + + r.emitDelimiter(0) +} + +func (r *DefaultReporter) highlightColorForState(state types.SpecState) string { + switch state { + case types.SpecStatePassed: + return "{{green}}" + case types.SpecStatePending: + return "{{yellow}}" + case types.SpecStateSkipped: + return "{{cyan}}" + case types.SpecStateFailed: + return "{{red}}" + case types.SpecStateTimedout: + return "{{orange}}" + case types.SpecStatePanicked: + return "{{magenta}}" + case types.SpecStateInterrupted: + return "{{orange}}" + case types.SpecStateAborted: + return "{{coral}}" + default: + return "{{gray}}" + } +} + +func (r *DefaultReporter) humanReadableState(state types.SpecState) string { + return strings.ToUpper(state.String()) +} + +func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, timeline types.Timeline) { + isVeryVerbose := r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose) + gw := report.CapturedGinkgoWriterOutput + cursor := 0 + for _, entry := range timeline { + tl := entry.GetTimelineLocation() + if tl.Offset < len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:tl.Offset])) + cursor = tl.Offset + } else if cursor < len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:])) + cursor = len(gw) + } + switch x := entry.(type) { + case types.Failure: + if isVeryVerbose { + r.emitFailure(indent, report.State, x, false) + } else { + r.emitShortFailure(indent, report.State, x) + } + case types.AdditionalFailure: + if isVeryVerbose { + r.emitFailure(indent, x.State, x.Failure, true) + } else { + r.emitShortFailure(indent, x.State, x.Failure) + } + case types.ReportEntry: + r.emitReportEntry(indent, x) + case types.ProgressReport: + r.emitProgressReport(indent, false, x) + case types.SpecEvent: + if isVeryVerbose || !x.IsOnlyVisibleAtVeryVerbose() || r.conf.ShowNodeEvents { + r.emitSpecEvent(indent, x, isVeryVerbose) + } + } + } + if cursor < len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:])) + } +} + +func (r *DefaultReporter) EmitFailure(state types.SpecState, failure types.Failure) { + if r.conf.Verbosity().Is(types.VerbosityLevelVerbose) { + r.emitShortFailure(1, state, failure) + } else if r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose) { + r.emitFailure(1, state, failure, true) + } +} + +func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, failure types.Failure) { + r.emitBlock(r.fi(indent, r.highlightColorForState(state)+"[%s]{{/}} in [%s] - %s {{gray}}@ %s{{/}}", + r.humanReadableState(state), + failure.FailureNodeType, + failure.Location, + failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), + )) +} + +func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) { + highlightColor := r.highlightColorForState(state) + r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message)) + if r.conf.GithubOutput { + level := "error" + if state.Is(types.SpecStateSkipped) { + level = "notice" + } + r.emitBlock(r.fi(indent, "::%s file=%s,line=%d::%s %s", level, failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } else { + r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } + if failure.ForwardedPanic != "" { + r.emitBlock("\n") + r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic)) + } + + if r.conf.FullTrace || failure.ForwardedPanic != "" { + r.emitBlock("\n") + r.emitBlock(r.fi(indent, highlightColor+"Full Stack Trace{{/}}")) + r.emitBlock(r.fi(indent+1, "%s", failure.Location.FullStackTrace)) + } + + if !failure.ProgressReport.IsZero() { + r.emitBlock("\n") + r.emitProgressReport(indent, false, failure.ProgressReport) + } + + if failure.AdditionalFailure != nil && includeAdditionalFailure { + r.emitBlock("\n") + r.emitFailure(indent, failure.AdditionalFailure.State, failure.AdditionalFailure.Failure, true) + } +} + +func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) { + r.emitDelimiter(1) + + if report.RunningInParallel { + r.emit(r.fi(1, "{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess)) + } + shouldEmitGW := report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose) + r.emitProgressReport(1, shouldEmitGW, report) + r.emitDelimiter(1) +} + +func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) { + if report.Message != "" { + r.emitBlock(r.fi(indent, report.Message+"\n")) + indent += 1 + } + if report.LeafNodeText != "" { + subjectIndent := indent + if len(report.ContainerHierarchyTexts) > 0 { + r.emit(r.fi(indent, r.cycleJoin(report.ContainerHierarchyTexts, " "))) + r.emit(" ") + subjectIndent = 0 + } + r.emit(r.fi(subjectIndent, "{{bold}}{{orange}}%s{{/}} (Spec Runtime: %s)\n", report.LeafNodeText, report.Time().Sub(report.SpecStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.LeafNodeLocation)) + indent += 1 + } + if report.CurrentNodeType != types.NodeTypeInvalid { + r.emit(r.fi(indent, "In {{bold}}{{orange}}[%s]{{/}}", report.CurrentNodeType)) + if report.CurrentNodeText != "" && !report.CurrentNodeType.Is(types.NodeTypeIt) { + r.emit(r.f(" {{bold}}{{orange}}%s{{/}}", report.CurrentNodeText)) + } + + r.emit(r.f(" (Node Runtime: %s)\n", report.Time().Sub(report.CurrentNodeStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentNodeLocation)) + indent += 1 + } + if report.CurrentStepText != "" { + r.emit(r.fi(indent, "At {{bold}}{{orange}}[By Step] %s{{/}} (Step Runtime: %s)\n", report.CurrentStepText, report.Time().Sub(report.CurrentStepStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentStepLocation)) + indent += 1 + } + + if indent > 0 { + indent -= 1 + } + + if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" { + r.emit("\n") + r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}")) + limit, lines := 10, strings.Split(report.CapturedGinkgoWriterOutput, "\n") + if len(lines) <= limit { + r.emitBlock(r.fi(indent+1, "%s", report.CapturedGinkgoWriterOutput)) + } else { + r.emitBlock(r.fi(indent+1, "{{gray}}...{{/}}")) + for _, line := range lines[len(lines)-limit-1:] { + r.emitBlock(r.fi(indent+1, "%s", line)) + } + } + r.emitBlock(r.fi(indent, "{{gray}}<< End Captured GinkgoWriter Output{{/}}")) + } + + if !report.SpecGoroutine().IsZero() { + r.emit("\n") + r.emit(r.fi(indent, "{{bold}}{{underline}}Spec Goroutine{{/}}\n")) + r.emitGoroutines(indent, report.SpecGoroutine()) + } + + if len(report.AdditionalReports) > 0 { + r.emit("\n") + r.emitBlock(r.fi(indent, "{{gray}}Begin Additional Progress Reports >>{{/}}")) + for i, additionalReport := range report.AdditionalReports { + r.emit(r.fi(indent+1, additionalReport)) + if i < len(report.AdditionalReports)-1 { + r.emitBlock(r.fi(indent+1, "{{gray}}%s{{/}}", strings.Repeat("-", 10))) + } + } + r.emitBlock(r.fi(indent, "{{gray}}<< End Additional Progress Reports{{/}}")) + } + + highlightedGoroutines := report.HighlightedGoroutines() + if len(highlightedGoroutines) > 0 { + r.emit("\n") + r.emit(r.fi(indent, "{{bold}}{{underline}}Goroutines of Interest{{/}}\n")) + r.emitGoroutines(indent, highlightedGoroutines...) + } + + otherGoroutines := report.OtherGoroutines() + if len(otherGoroutines) > 0 { + r.emit("\n") + r.emit(r.fi(indent, "{{gray}}{{bold}}{{underline}}Other Goroutines{{/}}\n")) + r.emitGoroutines(indent, otherGoroutines...) + } +} + +func (r *DefaultReporter) EmitReportEntry(entry types.ReportEntry) { + if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || entry.Visibility == types.ReportEntryVisibilityNever { + return + } + r.emitReportEntry(1, entry) +} + +func (r *DefaultReporter) emitReportEntry(indent uint, entry types.ReportEntry) { + r.emitBlock(r.fi(indent, "{{bold}}"+entry.Name+"{{gray}} "+fmt.Sprintf("- %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT)))) + if representation := entry.StringRepresentation(); representation != "" { + r.emitBlock(r.fi(indent+1, representation)) + } +} + +func (r *DefaultReporter) EmitSpecEvent(event types.SpecEvent) { + v := r.conf.Verbosity() + if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && (r.conf.ShowNodeEvents || !event.IsOnlyVisibleAtVeryVerbose())) { + r.emitSpecEvent(1, event, r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose)) + } +} + +func (r *DefaultReporter) emitSpecEvent(indent uint, event types.SpecEvent, includeLocation bool) { + location := "" + if includeLocation { + location = fmt.Sprintf("- %s ", event.CodeLocation.String()) + } + switch event.SpecEventType { + case types.SpecEventInvalid: + return + case types.SpecEventByStart: + r.emitBlock(r.fi(indent, "{{bold}}STEP:{{/}} %s {{gray}}%s@ %s{{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + case types.SpecEventByEnd: + r.emitBlock(r.fi(indent, "{{bold}}END STEP:{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond))) + case types.SpecEventNodeStart: + r.emitBlock(r.fi(indent, "> Enter {{bold}}[%s]{{/}} %s {{gray}}%s@ %s{{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + case types.SpecEventNodeEnd: + r.emitBlock(r.fi(indent, "< Exit {{bold}}[%s]{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond))) + case types.SpecEventSpecRepeat: + r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{green}}Passed{{/}}{{bold}}. Repeating %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + case types.SpecEventSpecRetry: + r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{red}}Failed{{/}}{{bold}}. Retrying %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } +} + +func (r *DefaultReporter) emitGoroutines(indent uint, goroutines ...types.Goroutine) { + for idx, g := range goroutines { + color := "{{gray}}" + if g.HasHighlights() { + color = "{{orange}}" + } + r.emit(r.fi(indent, color+"goroutine %d [%s]{{/}}\n", g.ID, g.State)) + for _, fc := range g.Stack { + if fc.Highlight { + r.emit(r.fi(indent, color+"{{bold}}> %s{{/}}\n", fc.Function)) + r.emit(r.fi(indent+2, color+"{{bold}}%s:%d{{/}}\n", fc.Filename, fc.Line)) + r.emitSource(indent+3, fc) + } else { + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", fc.Function)) + r.emit(r.fi(indent+2, "{{gray}}%s:%d{{/}}\n", fc.Filename, fc.Line)) + } + } + + if idx+1 < len(goroutines) { + r.emit("\n") + } + } +} + +func (r *DefaultReporter) emitSource(indent uint, fc types.FunctionCall) { + lines := fc.Source + if len(lines) == 0 { + return + } + + lTrim := 100000 + for _, line := range lines { + lTrimLine := len(line) - len(strings.TrimLeft(line, " \t")) + if lTrimLine < lTrim && len(line) > 0 { + lTrim = lTrimLine + } + } + if lTrim == 100000 { + lTrim = 0 + } + + for idx, line := range lines { + if len(line) > lTrim { + line = line[lTrim:] + } + if idx == fc.SourceHighlight { + r.emit(r.fi(indent, "{{bold}}{{orange}}> %s{{/}}\n", line)) + } else { + r.emit(r.fi(indent, "| %s\n", line)) + } + } +} + +/* Emitting to the writer */ +func (r *DefaultReporter) emit(s string) { + r._emit(s, false, false) +} + +func (r *DefaultReporter) emitBlock(s string) { + r._emit(s, true, false) +} + +func (r *DefaultReporter) emitDelimiter(indent uint) { + r._emit(r.fi(indent, "{{gray}}%s{{/}}", strings.Repeat("-", 30)), true, true) +} + +// a bit ugly - but we're trying to minimize locking on this hot codepath +func (r *DefaultReporter) _emit(s string, block bool, isDelimiter bool) { + if len(s) == 0 { + return + } + r.lock.Lock() + defer r.lock.Unlock() + if isDelimiter && r.lastEmissionWasDelimiter { + return + } + if block && !r.lastCharWasNewline { + r.writer.Write([]byte("\n")) + } + r.lastCharWasNewline = (s[len(s)-1:] == "\n") + r.writer.Write([]byte(s)) + if block && !r.lastCharWasNewline { + r.writer.Write([]byte("\n")) + r.lastCharWasNewline = true + } + r.lastEmissionWasDelimiter = isDelimiter +} + +/* Rendering text */ +func (r *DefaultReporter) f(format string, args ...interface{}) string { + return r.formatter.F(format, args...) +} + +func (r *DefaultReporter) fi(indentation uint, format string, args ...interface{}) string { + return r.formatter.Fi(indentation, format, args...) +} + +func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string { + return r.formatter.CycleJoin(elements, joiner, []string{"{{/}}", "{{gray}}"}) +} + +func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, veryVerbose bool, usePreciseFailureLocation bool) string { + texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{} + texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...) + + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText)) + } else { + texts = append(texts, r.f(report.LeafNodeText)) + } + labels = append(labels, report.LeafNodeLabels) + locations = append(locations, report.LeafNodeLocation) + + failureLocation := report.Failure.FailureNodeLocation + if usePreciseFailureLocation { + failureLocation = report.Failure.Location + } + + highlightIndex := -1 + switch report.Failure.FailureNodeContext { + case types.FailureNodeAtTopLevel: + texts = append([]string{fmt.Sprintf("TOP-LEVEL [%s]", report.Failure.FailureNodeType)}, texts...) + locations = append([]types.CodeLocation{failureLocation}, locations...) + labels = append([][]string{{}}, labels...) + highlightIndex = 0 + case types.FailureNodeInContainer: + i := report.Failure.FailureNodeContainerIndex + texts[i] = fmt.Sprintf("%s [%s]", texts[i], report.Failure.FailureNodeType) + locations[i] = failureLocation + highlightIndex = i + case types.FailureNodeIsLeafNode: + i := len(texts) - 1 + texts[i] = fmt.Sprintf("[%s] %s", report.LeafNodeType, report.LeafNodeText) + locations[i] = failureLocation + highlightIndex = i + default: + //there is no failure, so we highlight the leaf ndoe + highlightIndex = len(texts) - 1 + } + + out := "" + if veryVerbose { + for i := range texts { + if i == highlightIndex { + out += r.fi(uint(i), highlightColor+"{{bold}}%s{{/}}", texts[i]) + } else { + out += r.fi(uint(i), "%s", texts[i]) + } + if len(labels[i]) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", ")) + } + out += "\n" + out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i]) + } + } else { + for i := range texts { + style := "{{/}}" + if i%2 == 1 { + style = "{{gray}}" + } + if i == highlightIndex { + style = highlightColor + "{{bold}}" + } + out += r.f(style+"%s", texts[i]) + if i < len(texts)-1 { + out += " " + } else { + out += r.f("{{/}}") + } + } + flattenedLabels := report.Labels() + if len(flattenedLabels) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", ")) + } + out += "\n" + if usePreciseFailureLocation { + out += r.f("{{gray}}%s{{/}}", failureLocation) + } else { + leafLocation := locations[len(locations)-1] + if (report.Failure.FailureNodeLocation != types.CodeLocation{}) && (report.Failure.FailureNodeLocation != leafLocation) { + out += r.fi(1, highlightColor+"[%s]{{/}} {{gray}}%s{{/}}\n", report.Failure.FailureNodeType, report.Failure.FailureNodeLocation) + out += r.fi(1, "{{gray}}[%s] %s{{/}}", report.LeafNodeType, leafLocation) + } else { + out += r.f("{{gray}}%s{{/}}", leafLocation) + } + } + + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go new file mode 100644 index 00000000..613072eb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go @@ -0,0 +1,149 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/v2/config" + "github.com/onsi/ginkgo/v2/types" +) + +// Deprecated: DeprecatedReporter was how Ginkgo V1 provided support for CustomReporters +// this has been removed in V2. +// Please read the documentation at: +// https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters +// for Ginkgo's new behavior and for a migration path. +type DeprecatedReporter interface { + SuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) + BeforeSuiteDidRun(setupSummary *types.SetupSummary) + SpecWillRun(specSummary *types.SpecSummary) + SpecDidComplete(specSummary *types.SpecSummary) + AfterSuiteDidRun(setupSummary *types.SetupSummary) + SuiteDidEnd(summary *types.SuiteSummary) +} + +// ReportViaDeprecatedReporter takes a V1 custom reporter and a V2 report and +// calls the custom reporter's methods with appropriately transformed data from the V2 report. +// +// ReportViaDeprecatedReporter should be called in a `ReportAfterSuite()` +// +// Deprecated: ReportViaDeprecatedReporter method exists to help developer bridge between deprecated V1 functionality and the new +// reporting support in V2. It will be removed in a future minor version of Ginkgo. +func ReportViaDeprecatedReporter(reporter DeprecatedReporter, report types.Report) { + conf := config.DeprecatedGinkgoConfigType{ + RandomSeed: report.SuiteConfig.RandomSeed, + RandomizeAllSpecs: report.SuiteConfig.RandomizeAllSpecs, + FocusStrings: report.SuiteConfig.FocusStrings, + SkipStrings: report.SuiteConfig.SkipStrings, + FailOnPending: report.SuiteConfig.FailOnPending, + FailFast: report.SuiteConfig.FailFast, + FlakeAttempts: report.SuiteConfig.FlakeAttempts, + EmitSpecProgress: false, + DryRun: report.SuiteConfig.DryRun, + ParallelNode: report.SuiteConfig.ParallelProcess, + ParallelTotal: report.SuiteConfig.ParallelTotal, + SyncHost: report.SuiteConfig.ParallelHost, + StreamHost: report.SuiteConfig.ParallelHost, + } + + summary := &types.DeprecatedSuiteSummary{ + SuiteDescription: report.SuiteDescription, + SuiteID: report.SuitePath, + + NumberOfSpecsBeforeParallelization: report.PreRunStats.TotalSpecs, + NumberOfTotalSpecs: report.PreRunStats.TotalSpecs, + NumberOfSpecsThatWillBeRun: report.PreRunStats.SpecsThatWillRun, + } + + reporter.SuiteWillBegin(conf, summary) + + for _, spec := range report.SpecReports { + switch spec.LeafNodeType { + case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite: + setupSummary := &types.DeprecatedSetupSummary{ + ComponentType: spec.LeafNodeType, + CodeLocation: spec.LeafNodeLocation, + State: spec.State, + RunTime: spec.RunTime, + Failure: failureFor(spec), + CapturedOutput: spec.CombinedOutput(), + SuiteID: report.SuitePath, + } + reporter.BeforeSuiteDidRun(setupSummary) + case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite: + setupSummary := &types.DeprecatedSetupSummary{ + ComponentType: spec.LeafNodeType, + CodeLocation: spec.LeafNodeLocation, + State: spec.State, + RunTime: spec.RunTime, + Failure: failureFor(spec), + CapturedOutput: spec.CombinedOutput(), + SuiteID: report.SuitePath, + } + reporter.AfterSuiteDidRun(setupSummary) + case types.NodeTypeIt: + componentTexts, componentCodeLocations := []string{}, []types.CodeLocation{} + componentTexts = append(componentTexts, spec.ContainerHierarchyTexts...) + componentCodeLocations = append(componentCodeLocations, spec.ContainerHierarchyLocations...) + componentTexts = append(componentTexts, spec.LeafNodeText) + componentCodeLocations = append(componentCodeLocations, spec.LeafNodeLocation) + + specSummary := &types.DeprecatedSpecSummary{ + ComponentTexts: componentTexts, + ComponentCodeLocations: componentCodeLocations, + State: spec.State, + RunTime: spec.RunTime, + Failure: failureFor(spec), + NumberOfSamples: spec.NumAttempts, + CapturedOutput: spec.CombinedOutput(), + SuiteID: report.SuitePath, + } + reporter.SpecWillRun(specSummary) + reporter.SpecDidComplete(specSummary) + + switch spec.State { + case types.SpecStatePending: + summary.NumberOfPendingSpecs += 1 + case types.SpecStateSkipped: + summary.NumberOfSkippedSpecs += 1 + case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateInterrupted: + summary.NumberOfFailedSpecs += 1 + case types.SpecStatePassed: + summary.NumberOfPassedSpecs += 1 + if spec.NumAttempts > 1 { + summary.NumberOfFlakedSpecs += 1 + } + } + } + } + + summary.SuiteSucceeded = report.SuiteSucceeded + summary.RunTime = report.RunTime + + reporter.SuiteDidEnd(summary) +} + +func failureFor(spec types.SpecReport) types.DeprecatedSpecFailure { + if spec.Failure.IsZero() { + return types.DeprecatedSpecFailure{} + } + + index := 0 + switch spec.Failure.FailureNodeContext { + case types.FailureNodeInContainer: + index = spec.Failure.FailureNodeContainerIndex + case types.FailureNodeAtTopLevel: + index = -1 + case types.FailureNodeIsLeafNode: + index = len(spec.ContainerHierarchyTexts) - 1 + if spec.LeafNodeText != "" { + index += 1 + } + } + + return types.DeprecatedSpecFailure{ + Message: spec.Failure.Message, + Location: spec.Failure.Location, + ForwardedPanic: spec.Failure.ForwardedPanic, + ComponentIndex: index, + ComponentType: spec.Failure.FailureNodeType, + ComponentCodeLocation: spec.Failure.FailureNodeLocation, + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go new file mode 100644 index 00000000..5d3e8db9 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go @@ -0,0 +1,69 @@ +package reporters + +import ( + "encoding/json" + "fmt" + "os" + "path" + + "github.com/onsi/ginkgo/v2/types" +) + +// GenerateJSONReport produces a JSON-formatted report at the passed in destination +func GenerateJSONReport(report types.Report, destination string) error { + if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { + return err + } + f, err := os.Create(destination) + if err != nil { + return err + } + defer f.Close() + enc := json.NewEncoder(f) + enc.SetIndent("", " ") + err = enc.Encode([]types.Report{ + report, + }) + if err != nil { + return err + } + return nil +} + +// MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources +// It skips over reports that fail to decode but reports on them via the returned messages []string +func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, error) { + messages := []string{} + allReports := []types.Report{} + for _, source := range sources { + reports := []types.Report{} + data, err := os.ReadFile(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + err = json.Unmarshal(data, &reports) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) + continue + } + os.Remove(source) + allReports = append(allReports, reports...) + } + + if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { + return messages, err + } + f, err := os.Create(destination) + if err != nil { + return messages, err + } + defer f.Close() + enc := json.NewEncoder(f) + enc.SetIndent("", " ") + err = enc.Encode(allReports) + if err != nil { + return messages, err + } + return messages, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go new file mode 100644 index 00000000..2a3215b5 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -0,0 +1,389 @@ +/* + +JUnit XML Reporter for Ginkgo + +For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output + +The schema used for the generated JUnit xml file was adapted from https://llg.cubic.org/docs/junit/ + +*/ + +package reporters + +import ( + "encoding/xml" + "fmt" + "os" + "path" + "regexp" + "strings" + + "github.com/onsi/ginkgo/v2/config" + "github.com/onsi/ginkgo/v2/types" +) + +type JunitReportConfig struct { + // Spec States for which no timeline should be emitted for system-err + // set this to types.SpecStatePassed|types.SpecStateSkipped|types.SpecStatePending to only match failing specs + OmitTimelinesForSpecState types.SpecState + + // Enable OmitFailureMessageAttr to prevent failure messages appearing in the "message" attribute of the Failure and Error tags + OmitFailureMessageAttr bool + + //Enable OmitCapturedStdOutErr to prevent captured stdout/stderr appearing in system-out + OmitCapturedStdOutErr bool + + // Enable OmitSpecLabels to prevent labels from appearing in the spec name + OmitSpecLabels bool + + // Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name + OmitLeafNodeType bool + + // Enable OmitSuiteSetupNodes to prevent the creation of testcase entries for setup nodes + OmitSuiteSetupNodes bool +} + +type JUnitTestSuites struct { + XMLName xml.Name `xml:"testsuites"` + // Tests maps onto the total number of specs in all test suites (this includes any suite nodes such as BeforeSuite) + Tests int `xml:"tests,attr"` + // Disabled maps onto specs that are pending and/or skipped + Disabled int `xml:"disabled,attr"` + // Errors maps onto specs that panicked or were interrupted + Errors int `xml:"errors,attr"` + // Failures maps onto specs that failed + Failures int `xml:"failures,attr"` + // Time is the time in seconds to execute all test suites + Time float64 `xml:"time,attr"` + + //The set of all test suites + TestSuites []JUnitTestSuite `xml:"testsuite"` +} + +type JUnitTestSuite struct { + // Name maps onto the description of the test suite - maps onto Report.SuiteDescription + Name string `xml:"name,attr"` + // Package maps onto the absolute path to the test suite - maps onto Report.SuitePath + Package string `xml:"package,attr"` + // Tests maps onto the total number of specs in the test suite (this includes any suite nodes such as BeforeSuite) + Tests int `xml:"tests,attr"` + // Disabled maps onto specs that are pending + Disabled int `xml:"disabled,attr"` + // Skiped maps onto specs that are skipped + Skipped int `xml:"skipped,attr"` + // Errors maps onto specs that panicked or were interrupted + Errors int `xml:"errors,attr"` + // Failures maps onto specs that failed + Failures int `xml:"failures,attr"` + // Time is the time in seconds to execute all the test suite - maps onto Report.RunTime + Time float64 `xml:"time,attr"` + // Timestamp is the ISO 8601 formatted start-time of the suite - maps onto Report.StartTime + Timestamp string `xml:"timestamp,attr"` + + //Properties captures the information stored in the rest of the Report type (including SuiteConfig) as key-value pairs + Properties JUnitProperties `xml:"properties"` + + //TestCases capture the individual specs + TestCases []JUnitTestCase `xml:"testcase"` +} + +type JUnitProperties struct { + Properties []JUnitProperty `xml:"property"` +} + +func (jup JUnitProperties) WithName(name string) string { + for _, property := range jup.Properties { + if property.Name == name { + return property.Value + } + } + return "" +} + +type JUnitProperty struct { + Name string `xml:"name,attr"` + Value string `xml:"value,attr"` +} + +var ownerRE = regexp.MustCompile(`(?i)^owner:(.*)$`) + +type JUnitTestCase struct { + // Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()" + Name string `xml:"name,attr"` + // Classname maps onto the name of the test suite - equivalent to Report.SuiteDescription + Classname string `xml:"classname,attr"` + // Status maps onto the string representation of SpecReport.State + Status string `xml:"status,attr"` + // Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime + Time float64 `xml:"time,attr"` + // Owner is the owner the spec - is set if a label matching Label("owner:X") is provided. The last matching label is used as the owner, thereby allowing specs to override owners specified in container nodes. + Owner string `xml:"owner,attr,omitempty"` + //Skipped is populated with a message if the test was skipped or pending + Skipped *JUnitSkipped `xml:"skipped,omitempty"` + //Error is populated if the test panicked or was interrupted + Error *JUnitError `xml:"error,omitempty"` + //Failure is populated if the test failed + Failure *JUnitFailure `xml:"failure,omitempty"` + //SystemOut maps onto any captured stdout/stderr output - maps onto SpecReport.CapturedStdOutErr + SystemOut string `xml:"system-out,omitempty"` + //SystemOut maps onto any captured GinkgoWriter output - maps onto SpecReport.CapturedGinkgoWriterOutput + SystemErr string `xml:"system-err,omitempty"` +} + +type JUnitSkipped struct { + // Message maps onto "pending" if the test was marked pending, "skipped" if the test was marked skipped, and "skipped - REASON" if the user called Skip(REASON) + Message string `xml:"message,attr"` +} + +type JUnitError struct { + //Message maps onto the panic/exception thrown - equivalent to SpecReport.Failure.ForwardedPanic - or to "interrupted" + Message string `xml:"message,attr"` + //Type is one of "panicked" or "interrupted" + Type string `xml:"type,attr"` + //Description maps onto the captured stack trace for a panic, or the failure message for an interrupt which will include the dump of running goroutines + Description string `xml:",chardata"` +} + +type JUnitFailure struct { + //Message maps onto the failure message - equivalent to SpecReport.Failure.Message + Message string `xml:"message,attr"` + //Type is "failed" + Type string `xml:"type,attr"` + //Description maps onto the location and stack trace of the failure + Description string `xml:",chardata"` +} + +func GenerateJUnitReport(report types.Report, dst string) error { + return GenerateJUnitReportWithConfig(report, dst, JunitReportConfig{}) +} + +func GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig) error { + suite := JUnitTestSuite{ + Name: report.SuiteDescription, + Package: report.SuitePath, + Time: report.RunTime.Seconds(), + Timestamp: report.StartTime.Format("2006-01-02T15:04:05"), + Properties: JUnitProperties{ + Properties: []JUnitProperty{ + {"SuiteSucceeded", fmt.Sprintf("%t", report.SuiteSucceeded)}, + {"SuiteHasProgrammaticFocus", fmt.Sprintf("%t", report.SuiteHasProgrammaticFocus)}, + {"SpecialSuiteFailureReason", strings.Join(report.SpecialSuiteFailureReasons, ",")}, + {"SuiteLabels", fmt.Sprintf("[%s]", strings.Join(report.SuiteLabels, ","))}, + {"RandomSeed", fmt.Sprintf("%d", report.SuiteConfig.RandomSeed)}, + {"RandomizeAllSpecs", fmt.Sprintf("%t", report.SuiteConfig.RandomizeAllSpecs)}, + {"LabelFilter", report.SuiteConfig.LabelFilter}, + {"FocusStrings", strings.Join(report.SuiteConfig.FocusStrings, ",")}, + {"SkipStrings", strings.Join(report.SuiteConfig.SkipStrings, ",")}, + {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")}, + {"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")}, + {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)}, + {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)}, + {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)}, + {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)}, + {"ParallelTotal", fmt.Sprintf("%d", report.SuiteConfig.ParallelTotal)}, + {"OutputInterceptorMode", report.SuiteConfig.OutputInterceptorMode}, + }, + }, + } + for _, spec := range report.SpecReports { + if config.OmitSuiteSetupNodes && spec.LeafNodeType != types.NodeTypeIt { + continue + } + name := fmt.Sprintf("[%s]", spec.LeafNodeType) + if config.OmitLeafNodeType { + name = "" + } + if spec.FullText() != "" { + name = name + " " + spec.FullText() + } + labels := spec.Labels() + if len(labels) > 0 && !config.OmitSpecLabels { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + owner := "" + for _, label := range labels { + if matches := ownerRE.FindStringSubmatch(label); len(matches) == 2 { + owner = matches[1] + } + } + name = strings.TrimSpace(name) + + test := JUnitTestCase{ + Name: name, + Classname: report.SuiteDescription, + Status: spec.State.String(), + Time: spec.RunTime.Seconds(), + Owner: owner, + } + if !spec.State.Is(config.OmitTimelinesForSpecState) { + test.SystemErr = systemErrForUnstructuredReporters(spec) + } + if !config.OmitCapturedStdOutErr { + test.SystemOut = systemOutForUnstructuredReporters(spec) + } + suite.Tests += 1 + + switch spec.State { + case types.SpecStateSkipped: + message := "skipped" + if spec.Failure.Message != "" { + message += " - " + spec.Failure.Message + } + test.Skipped = &JUnitSkipped{Message: message} + suite.Skipped += 1 + case types.SpecStatePending: + test.Skipped = &JUnitSkipped{Message: "pending"} + suite.Disabled += 1 + case types.SpecStateFailed: + test.Failure = &JUnitFailure{ + Message: spec.Failure.Message, + Type: "failed", + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Failure.Message = "" + } + suite.Failures += 1 + case types.SpecStateTimedout: + test.Failure = &JUnitFailure{ + Message: spec.Failure.Message, + Type: "timedout", + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Failure.Message = "" + } + suite.Failures += 1 + case types.SpecStateInterrupted: + test.Error = &JUnitError{ + Message: spec.Failure.Message, + Type: "interrupted", + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Error.Message = "" + } + suite.Errors += 1 + case types.SpecStateAborted: + test.Failure = &JUnitFailure{ + Message: spec.Failure.Message, + Type: "aborted", + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Failure.Message = "" + } + suite.Errors += 1 + case types.SpecStatePanicked: + test.Error = &JUnitError{ + Message: spec.Failure.ForwardedPanic, + Type: "panicked", + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Error.Message = "" + } + suite.Errors += 1 + } + + suite.TestCases = append(suite.TestCases, test) + } + + junitReport := JUnitTestSuites{ + Tests: suite.Tests, + Disabled: suite.Disabled + suite.Skipped, + Errors: suite.Errors, + Failures: suite.Failures, + Time: suite.Time, + TestSuites: []JUnitTestSuite{suite}, + } + + if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { + return err + } + f, err := os.Create(dst) + if err != nil { + return err + } + f.WriteString(xml.Header) + encoder := xml.NewEncoder(f) + encoder.Indent(" ", " ") + encoder.Encode(junitReport) + + return f.Close() +} + +func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) { + messages := []string{} + mergedReport := JUnitTestSuites{} + for _, source := range sources { + report := JUnitTestSuites{} + f, err := os.Open(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + err = xml.NewDecoder(f).Decode(&report) + _ = f.Close() + if err != nil { + messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) + continue + } + os.Remove(source) + + mergedReport.Tests += report.Tests + mergedReport.Disabled += report.Disabled + mergedReport.Errors += report.Errors + mergedReport.Failures += report.Failures + mergedReport.Time += report.Time + mergedReport.TestSuites = append(mergedReport.TestSuites, report.TestSuites...) + } + + if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { + return messages, err + } + f, err := os.Create(dst) + if err != nil { + return messages, err + } + f.WriteString(xml.Header) + encoder := xml.NewEncoder(f) + encoder.Indent(" ", " ") + encoder.Encode(mergedReport) + + return messages, f.Close() +} + +func failureDescriptionForUnstructuredReporters(spec types.SpecReport) string { + out := &strings.Builder{} + NewDefaultReporter(types.ReporterConfig{NoColor: true, VeryVerbose: true}, out).emitFailure(0, spec.State, spec.Failure, true) + if len(spec.AdditionalFailures) > 0 { + out.WriteString("\nThere were additional failures detected after the initial failure. These are visible in the timeline\n") + } + return out.String() +} + +func systemErrForUnstructuredReporters(spec types.SpecReport) string { + return RenderTimeline(spec, true) +} + +func RenderTimeline(spec types.SpecReport, noColor bool) string { + out := &strings.Builder{} + NewDefaultReporter(types.ReporterConfig{NoColor: noColor, VeryVerbose: true}, out).emitTimeline(0, spec, spec.Timeline()) + return out.String() +} + +func systemOutForUnstructuredReporters(spec types.SpecReport) string { + return spec.CapturedStdOutErr +} + +// Deprecated JUnitReporter (so folks can still compile their suites) +type JUnitReporter struct{} + +func NewJUnitReporter(_ string) *JUnitReporter { return &JUnitReporter{} } +func (reporter *JUnitReporter) SuiteWillBegin(_ config.GinkgoConfigType, _ *types.SuiteSummary) {} +func (reporter *JUnitReporter) BeforeSuiteDidRun(_ *types.SetupSummary) {} +func (reporter *JUnitReporter) SpecWillRun(_ *types.SpecSummary) {} +func (reporter *JUnitReporter) SpecDidComplete(_ *types.SpecSummary) {} +func (reporter *JUnitReporter) AfterSuiteDidRun(_ *types.SetupSummary) {} +func (reporter *JUnitReporter) SuiteDidEnd(_ *types.SuiteSummary) {} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go new file mode 100644 index 00000000..5e726c46 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go @@ -0,0 +1,29 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/v2/types" +) + +type Reporter interface { + SuiteWillBegin(report types.Report) + WillRun(report types.SpecReport) + DidRun(report types.SpecReport) + SuiteDidEnd(report types.Report) + + //Timeline emission + EmitFailure(state types.SpecState, failure types.Failure) + EmitProgressReport(progressReport types.ProgressReport) + EmitReportEntry(entry types.ReportEntry) + EmitSpecEvent(event types.SpecEvent) +} + +type NoopReporter struct{} + +func (n NoopReporter) SuiteWillBegin(report types.Report) {} +func (n NoopReporter) WillRun(report types.SpecReport) {} +func (n NoopReporter) DidRun(report types.SpecReport) {} +func (n NoopReporter) SuiteDidEnd(report types.Report) {} +func (n NoopReporter) EmitFailure(state types.SpecState, failure types.Failure) {} +func (n NoopReporter) EmitProgressReport(progressReport types.ProgressReport) {} +func (n NoopReporter) EmitReportEntry(entry types.ReportEntry) {} +func (n NoopReporter) EmitSpecEvent(event types.SpecEvent) {} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go new file mode 100644 index 00000000..e990ad82 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go @@ -0,0 +1,105 @@ +/* + +TeamCity Reporter for Ginkgo + +Makes use of TeamCity's support for Service Messages +http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests +*/ + +package reporters + +import ( + "fmt" + "os" + "path" + "strings" + + "github.com/onsi/ginkgo/v2/types" +) + +func tcEscape(s string) string { + s = strings.ReplaceAll(s, "|", "||") + s = strings.ReplaceAll(s, "'", "|'") + s = strings.ReplaceAll(s, "\n", "|n") + s = strings.ReplaceAll(s, "\r", "|r") + s = strings.ReplaceAll(s, "[", "|[") + s = strings.ReplaceAll(s, "]", "|]") + return s +} + +func GenerateTeamcityReport(report types.Report, dst string) error { + if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { + return err + } + f, err := os.Create(dst) + if err != nil { + return err + } + + name := report.SuiteDescription + labels := report.SuiteLabels + if len(labels) > 0 { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + fmt.Fprintf(f, "##teamcity[testSuiteStarted name='%s']\n", tcEscape(name)) + for _, spec := range report.SpecReports { + name := fmt.Sprintf("[%s]", spec.LeafNodeType) + if spec.FullText() != "" { + name = name + " " + spec.FullText() + } + labels := spec.Labels() + if len(labels) > 0 { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + + name = tcEscape(name) + fmt.Fprintf(f, "##teamcity[testStarted name='%s']\n", name) + switch spec.State { + case types.SpecStatePending: + fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='pending']\n", name) + case types.SpecStateSkipped: + message := "skipped" + if spec.Failure.Message != "" { + message += " - " + spec.Failure.Message + } + fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='%s']\n", name, tcEscape(message)) + case types.SpecStateFailed: + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='failed - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) + case types.SpecStatePanicked: + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='panicked - %s' details='%s']\n", name, tcEscape(spec.Failure.ForwardedPanic), tcEscape(details)) + case types.SpecStateTimedout: + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='timedout - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) + case types.SpecStateInterrupted: + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='interrupted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) + case types.SpecStateAborted: + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='aborted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) + } + + fmt.Fprintf(f, "##teamcity[testStdOut name='%s' out='%s']\n", name, tcEscape(systemOutForUnstructuredReporters(spec))) + fmt.Fprintf(f, "##teamcity[testStdErr name='%s' out='%s']\n", name, tcEscape(systemErrForUnstructuredReporters(spec))) + fmt.Fprintf(f, "##teamcity[testFinished name='%s' duration='%d']\n", name, int(spec.RunTime.Seconds()*1000.0)) + } + fmt.Fprintf(f, "##teamcity[testSuiteFinished name='%s']\n", tcEscape(report.SuiteDescription)) + + return f.Close() +} + +func MergeAndCleanupTeamcityReports(sources []string, dst string) ([]string, error) { + messages := []string{} + merged := []byte{} + for _, source := range sources { + data, err := os.ReadFile(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + os.Remove(source) + merged = append(merged, data...) + } + return messages, os.WriteFile(dst, merged, 0666) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go new file mode 100644 index 00000000..57e87517 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go @@ -0,0 +1,159 @@ +package types + +import ( + "fmt" + "os" + "regexp" + "runtime" + "runtime/debug" + "strings" + "sync" +) + +type CodeLocation struct { + FileName string `json:",omitempty"` + LineNumber int `json:",omitempty"` + FullStackTrace string `json:",omitempty"` + CustomMessage string `json:",omitempty"` +} + +func (codeLocation CodeLocation) String() string { + if codeLocation.CustomMessage != "" { + return codeLocation.CustomMessage + } + return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber) +} + +func (codeLocation CodeLocation) ContentsOfLine() string { + if codeLocation.CustomMessage != "" { + return "" + } + contents, err := os.ReadFile(codeLocation.FileName) + if err != nil { + return "" + } + lines := strings.Split(string(contents), "\n") + if len(lines) < codeLocation.LineNumber { + return "" + } + return lines[codeLocation.LineNumber-1] +} + +type codeLocationLocator struct { + pcs map[uintptr]bool + helpers map[string]bool + lock *sync.Mutex +} + +func (c *codeLocationLocator) addHelper(pc uintptr) { + c.lock.Lock() + defer c.lock.Unlock() + + if c.pcs[pc] { + return + } + c.lock.Unlock() + f := runtime.FuncForPC(pc) + c.lock.Lock() + if f == nil { + return + } + c.helpers[f.Name()] = true + c.pcs[pc] = true +} + +func (c *codeLocationLocator) hasHelper(name string) bool { + c.lock.Lock() + defer c.lock.Unlock() + return c.helpers[name] +} + +func (c *codeLocationLocator) getCodeLocation(skip int) CodeLocation { + pc := make([]uintptr, 40) + n := runtime.Callers(skip+2, pc) + if n == 0 { + return CodeLocation{} + } + pc = pc[:n] + frames := runtime.CallersFrames(pc) + for { + frame, more := frames.Next() + if !c.hasHelper(frame.Function) { + return CodeLocation{FileName: frame.File, LineNumber: frame.Line} + } + if !more { + break + } + } + return CodeLocation{} +} + +var clLocator = &codeLocationLocator{ + pcs: map[uintptr]bool{}, + helpers: map[string]bool{}, + lock: &sync.Mutex{}, +} + +// MarkAsHelper is used by GinkgoHelper to mark the caller (appropriately offset by skip)as a helper. You can use this directly if you need to provide an optional `skip` to mark functions further up the call stack as helpers. +func MarkAsHelper(optionalSkip ...int) { + skip := 1 + if len(optionalSkip) > 0 { + skip += optionalSkip[0] + } + pc, _, _, ok := runtime.Caller(skip) + if ok { + clLocator.addHelper(pc) + } +} + +func NewCustomCodeLocation(message string) CodeLocation { + return CodeLocation{ + CustomMessage: message, + } +} + +func NewCodeLocation(skip int) CodeLocation { + return clLocator.getCodeLocation(skip + 1) +} + +func NewCodeLocationWithStackTrace(skip int) CodeLocation { + cl := clLocator.getCodeLocation(skip + 1) + cl.FullStackTrace = PruneStack(string(debug.Stack()), skip+1) + return cl +} + +// PruneStack removes references to functions that are internal to Ginkgo +// and the Go runtime from a stack string and a certain number of stack entries +// at the beginning of the stack. The stack string has the format +// as returned by runtime/debug.Stack. The leading goroutine information is +// optional and always removed if present. Beware that runtime/debug.Stack +// adds itself as first entry, so typically skip must be >= 1 to remove that +// entry. +func PruneStack(fullStackTrace string, skip int) string { + stack := strings.Split(fullStackTrace, "\n") + // Ensure that the even entries are the method names and the + // odd entries the source code information. + if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") { + // Ignore "goroutine 29 [running]:" line. + stack = stack[1:] + } + // The "+1" is for skipping over the initial entry, which is + // runtime/debug.Stack() itself. + if len(stack) > 2*(skip+1) { + stack = stack[2*(skip+1):] + } + prunedStack := []string{} + if os.Getenv("GINKGO_PRUNE_STACK") == "FALSE" { + prunedStack = stack + } else { + re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) + for i := 0; i < len(stack)/2; i++ { + // We filter out based on the source code file name. + if !re.MatchString(stack[i*2+1]) { + prunedStack = append(prunedStack, stack[i*2]) + prunedStack = append(prunedStack, stack[i*2+1]) + } + } + } + return strings.Join(prunedStack, "\n") +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go new file mode 100644 index 00000000..cef273ee --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -0,0 +1,761 @@ +/* +Ginkgo accepts a number of configuration options. +These are documented [here](http://onsi.github.io/ginkgo/#the-ginkgo-cli) +*/ + +package types + +import ( + "flag" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "time" +) + +// Configuration controlling how an individual test suite is run +type SuiteConfig struct { + RandomSeed int64 + RandomizeAllSpecs bool + FocusStrings []string + SkipStrings []string + FocusFiles []string + SkipFiles []string + LabelFilter string + FailOnPending bool + FailFast bool + FlakeAttempts int + MustPassRepeatedly int + DryRun bool + PollProgressAfter time.Duration + PollProgressInterval time.Duration + Timeout time.Duration + EmitSpecProgress bool // this is deprecated but its removal is causing compile issue for some users that were setting it manually + OutputInterceptorMode string + SourceRoots []string + GracePeriod time.Duration + + ParallelProcess int + ParallelTotal int + ParallelHost string +} + +func NewDefaultSuiteConfig() SuiteConfig { + return SuiteConfig{ + RandomSeed: time.Now().Unix(), + Timeout: time.Hour, + ParallelProcess: 1, + ParallelTotal: 1, + GracePeriod: 30 * time.Second, + } +} + +type VerbosityLevel uint + +const ( + VerbosityLevelSuccinct VerbosityLevel = iota + VerbosityLevelNormal + VerbosityLevelVerbose + VerbosityLevelVeryVerbose +) + +func (vl VerbosityLevel) GT(comp VerbosityLevel) bool { + return vl > comp +} + +func (vl VerbosityLevel) GTE(comp VerbosityLevel) bool { + return vl >= comp +} + +func (vl VerbosityLevel) Is(comp VerbosityLevel) bool { + return vl == comp +} + +func (vl VerbosityLevel) LTE(comp VerbosityLevel) bool { + return vl <= comp +} + +func (vl VerbosityLevel) LT(comp VerbosityLevel) bool { + return vl < comp +} + +// Configuration for Ginkgo's reporter +type ReporterConfig struct { + NoColor bool + Succinct bool + Verbose bool + VeryVerbose bool + FullTrace bool + ShowNodeEvents bool + GithubOutput bool + + JSONReport string + JUnitReport string + TeamcityReport string +} + +func (rc ReporterConfig) Verbosity() VerbosityLevel { + if rc.Succinct { + return VerbosityLevelSuccinct + } else if rc.Verbose { + return VerbosityLevelVerbose + } else if rc.VeryVerbose { + return VerbosityLevelVeryVerbose + } + return VerbosityLevelNormal +} + +func (rc ReporterConfig) WillGenerateReport() bool { + return rc.JSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != "" +} + +func NewDefaultReporterConfig() ReporterConfig { + return ReporterConfig{} +} + +// Configuration for the Ginkgo CLI +type CLIConfig struct { + //for build, run, and watch + Recurse bool + SkipPackage string + RequireSuite bool + NumCompilers int + + //for run and watch only + Procs int + Parallel bool + AfterRunHook string + OutputDir string + KeepSeparateCoverprofiles bool + KeepSeparateReports bool + + //for run only + KeepGoing bool + UntilItFails bool + Repeat int + RandomizeSuites bool + + //for watch only + Depth int + WatchRegExp string +} + +func NewDefaultCLIConfig() CLIConfig { + return CLIConfig{ + Depth: 1, + WatchRegExp: `\.go$`, + } +} + +func (g CLIConfig) ComputedProcs() int { + if g.Procs > 0 { + return g.Procs + } + + n := 1 + if g.Parallel { + n = runtime.NumCPU() + if n > 4 { + n = n - 1 + } + } + return n +} + +func (g CLIConfig) ComputedNumCompilers() int { + if g.NumCompilers > 0 { + return g.NumCompilers + } + + return runtime.NumCPU() +} + +// Configuration for the Ginkgo CLI capturing available go flags +// A subset of Go flags are exposed by Ginkgo. Some are available at compile time (e.g. ginkgo build) and others only at run time (e.g. ginkgo run - which has both build and run time flags). +// More details can be found at: +// https://docs.google.com/spreadsheets/d/1zkp-DS4hU4sAJl5eHh1UmgwxCPQhf3s5a8fbiOI8tJU/ +type GoFlagsConfig struct { + //build-time flags for code-and-performance analysis + Race bool + Cover bool + CoverMode string + CoverPkg string + Vet string + + //run-time flags for code-and-performance analysis + BlockProfile string + BlockProfileRate int + CoverProfile string + CPUProfile string + MemProfile string + MemProfileRate int + MutexProfile string + MutexProfileFraction int + Trace string + + //build-time flags for building + A bool + ASMFlags string + BuildMode string + Compiler string + GCCGoFlags string + GCFlags string + InstallSuffix string + LDFlags string + LinkShared bool + Mod string + N bool + ModFile string + ModCacheRW bool + MSan bool + PkgDir string + Tags string + TrimPath bool + ToolExec string + Work bool + X bool +} + +func NewDefaultGoFlagsConfig() GoFlagsConfig { + return GoFlagsConfig{} +} + +func (g GoFlagsConfig) BinaryMustBePreserved() bool { + return g.BlockProfile != "" || g.CPUProfile != "" || g.MemProfile != "" || g.MutexProfile != "" +} + +// Configuration that were deprecated in 2.0 +type deprecatedConfig struct { + DebugParallel bool + NoisySkippings bool + NoisyPendings bool + RegexScansFilePath bool + SlowSpecThresholdWithFLoatUnits float64 + Stream bool + Notify bool + EmitSpecProgress bool + SlowSpecThreshold time.Duration + AlwaysEmitGinkgoWriter bool +} + +// Flags + +// Flags sections used by both the CLI and the Ginkgo test process +var FlagSections = GinkgoFlagSections{ + {Key: "multiple-suites", Style: "{{dark-green}}", Heading: "Running Multiple Test Suites"}, + {Key: "order", Style: "{{green}}", Heading: "Controlling Test Order"}, + {Key: "parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism"}, + {Key: "low-level-parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism", + Description: "These are set by the Ginkgo CLI, {{red}}{{bold}}do not set them manually{{/}} via go test.\nUse ginkgo -p or ginkgo -procs=N instead."}, + {Key: "filter", Style: "{{cyan}}", Heading: "Filtering Tests"}, + {Key: "failure", Style: "{{red}}", Heading: "Failure Handling"}, + {Key: "output", Style: "{{magenta}}", Heading: "Controlling Output Formatting"}, + {Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis"}, + {Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis"}, + {Key: "debug", Style: "{{blue}}", Heading: "Debugging Tests", + Description: "In addition to these flags, Ginkgo supports a few debugging environment variables. To change the parallel server protocol set {{blue}}GINKGO_PARALLEL_PROTOCOL{{/}} to {{bold}}HTTP{{/}}. To avoid pruning callstacks set {{blue}}GINKGO_PRUNE_STACK{{/}} to {{bold}}FALSE{{/}}."}, + {Key: "watch", Style: "{{light-yellow}}", Heading: "Controlling Ginkgo Watch"}, + {Key: "misc", Style: "{{light-gray}}", Heading: "Miscellaneous"}, + {Key: "go-build", Style: "{{light-gray}}", Heading: "Go Build Flags", Succinct: true, + Description: "These flags are inherited from go build. Run {{bold}}ginkgo help build{{/}} for more detailed flag documentation."}, +} + +// SuiteConfigFlags provides flags for the Ginkgo test process, and CLI +var SuiteConfigFlags = GinkgoFlags{ + {KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo", + Usage: "The seed used to randomize the spec suite.", AlwaysExport: true}, + {KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."}, + + {KeyPath: "S.FailOnPending", Name: "fail-on-pending", SectionKey: "failure", DeprecatedName: "failOnPending", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will mark the test suite as failed if any specs are pending."}, + {KeyPath: "S.FailFast", Name: "fail-fast", SectionKey: "failure", DeprecatedName: "failFast", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will stop running a test suite after a failure occurs."}, + {KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags", + Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."}, + + {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."}, + {KeyPath: "S.PollProgressAfter", Name: "poll-progress-after", SectionKey: "debug", UsageDefaultValue: "0", + Usage: "Emit node progress reports periodically if node hasn't completed after this duration."}, + {KeyPath: "S.PollProgressInterval", Name: "poll-progress-interval", SectionKey: "debug", UsageDefaultValue: "10s", + Usage: "The rate at which to emit node progress reports after poll-progress-after has elapsed."}, + {KeyPath: "S.SourceRoots", Name: "source-root", SectionKey: "debug", + Usage: "The location to look for source code when generating progress reports. You can pass multiple --source-root flags."}, + {KeyPath: "S.Timeout", Name: "timeout", SectionKey: "debug", UsageDefaultValue: "1h", + Usage: "Test suite fails if it does not complete within the specified timeout."}, + {KeyPath: "S.GracePeriod", Name: "grace-period", SectionKey: "debug", UsageDefaultValue: "30s", + Usage: "When interrupted, Ginkgo will wait for GracePeriod for the current running node to exit before moving on to the next one."}, + {KeyPath: "S.OutputInterceptorMode", Name: "output-interceptor-mode", SectionKey: "debug", UsageArgument: "dup, swap, or none", + Usage: "If set, ginkgo will use the specified output interception strategy when running in parallel. Defaults to dup on unix and swap on windows."}, + + {KeyPath: "S.LabelFilter", Name: "label-filter", SectionKey: "filter", UsageArgument: "expression", + Usage: "If set, ginkgo will only run specs with labels that match the label-filter. The passed-in expression can include boolean operations (!, &&, ||, ','), groupings via '()', and regular expressions '/regexp/'. e.g. '(cat || dog) && !fruit'"}, + {KeyPath: "S.FocusStrings", Name: "focus", SectionKey: "filter", + Usage: "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed."}, + {KeyPath: "S.SkipStrings", Name: "skip", SectionKey: "filter", + Usage: "If set, ginkgo will only run specs that do not match this regular expression. Can be specified multiple times, values are ORed."}, + {KeyPath: "S.FocusFiles", Name: "focus-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line", + Usage: "If set, ginkgo will only run specs in matching files. Can be specified multiple times, values are ORed."}, + {KeyPath: "S.SkipFiles", Name: "skip-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line", + Usage: "If set, ginkgo will skip specs in matching files. Can be specified multiple times, values are ORed."}, + + {KeyPath: "D.RegexScansFilePath", DeprecatedName: "regexScansFilePath", DeprecatedDocLink: "removed--regexscansfilepath", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.DebugParallel", DeprecatedName: "debug", DeprecatedDocLink: "removed--debug", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.EmitSpecProgress", DeprecatedName: "progress", SectionKey: "debug", + DeprecatedVersion: "2.5.0", Usage: ". The functionality provided by --progress was confusing and is no longer needed. Use --show-node-events instead to see node entry and exit events included in the timeline of failed and verbose specs. Or you can run with -vv to always see all node events. Lastly, --poll-progress-after and the PollProgressAfter decorator now provide a better mechanism for debugging specs that tend to get stuck."}, +} + +// ParallelConfigFlags provides flags for the Ginkgo test process (not the CLI) +var ParallelConfigFlags = GinkgoFlags{ + {KeyPath: "S.ParallelProcess", Name: "parallel.process", SectionKey: "low-level-parallel", UsageDefaultValue: "1", + Usage: "This worker process's (one-indexed) process number. For running specs in parallel."}, + {KeyPath: "S.ParallelTotal", Name: "parallel.total", SectionKey: "low-level-parallel", UsageDefaultValue: "1", + Usage: "The total number of worker processes. For running specs in parallel."}, + {KeyPath: "S.ParallelHost", Name: "parallel.host", SectionKey: "low-level-parallel", UsageDefaultValue: "set by Ginkgo CLI", + Usage: "The address for the server that will synchronize the processes."}, +} + +// ReporterConfigFlags provides flags for the Ginkgo test process, and CLI +var ReporterConfigFlags = GinkgoFlags{ + {KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, suppress color output in default reporter."}, + {KeyPath: "R.Verbose", Name: "v", SectionKey: "output", + Usage: "If set, emits more output including GinkgoWriter contents."}, + {KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output", + Usage: "If set, emits with maximal verbosity - includes skipped and pending tests."}, + {KeyPath: "R.Succinct", Name: "succinct", SectionKey: "output", + Usage: "If set, default reporter prints out a very succinct report"}, + {KeyPath: "R.FullTrace", Name: "trace", SectionKey: "output", + Usage: "If set, default reporter prints out the full stack trace when a failure occurs"}, + {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output", + Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"}, + {KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output", + Usage: "If set, default reporter prints easier to manage output in Github Actions."}, + + {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", + Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, + {KeyPath: "R.JUnitReport", Name: "junit-report", UsageArgument: "filename.xml", SectionKey: "output", DeprecatedName: "reportFile", DeprecatedDocLink: "improved-reporting-infrastructure", + Usage: "If set, Ginkgo will generate a conformant junit test report in the specified file."}, + {KeyPath: "R.TeamcityReport", Name: "teamcity-report", UsageArgument: "filename", SectionKey: "output", + Usage: "If set, Ginkgo will generate a Teamcity-formatted test report at the specified location."}, + + {KeyPath: "D.SlowSpecThresholdWithFLoatUnits", DeprecatedName: "slowSpecThreshold", DeprecatedDocLink: "changed--slowspecthreshold", + Usage: "use --slow-spec-threshold instead and pass in a duration string (e.g. '5s', not '5.0')"}, + {KeyPath: "D.NoisyPendings", DeprecatedName: "noisyPendings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.NoisySkippings", DeprecatedName: "noisySkippings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.SlowSpecThreshold", DeprecatedName: "slow-spec-threshold", SectionKey: "output", Usage: "--slow-spec-threshold has been deprecated and will be removed in a future version of Ginkgo. This feature has proved to be more noisy than useful. You can use --poll-progress-after, instead, to get more actionable feedback about potentially slow specs and understand where they might be getting stuck.", DeprecatedVersion: "2.5.0"}, + {KeyPath: "D.AlwaysEmitGinkgoWriter", DeprecatedName: "always-emit-ginkgo-writer", SectionKey: "output", Usage: " - use -v instead, or one of Ginkgo's machine-readable report formats to get GinkgoWriter output for passing specs."}, +} + +// BuildTestSuiteFlagSet attaches to the CommandLine flagset and provides flags for the Ginkgo test process +func BuildTestSuiteFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig) (GinkgoFlagSet, error) { + flags := SuiteConfigFlags.CopyAppend(ParallelConfigFlags...).CopyAppend(ReporterConfigFlags...) + flags = flags.WithPrefix("ginkgo") + bindings := map[string]interface{}{ + "S": suiteConfig, + "R": reporterConfig, + "D": &deprecatedConfig{}, + } + extraGoFlagsSection := GinkgoFlagSection{Style: "{{gray}}", Heading: "Go test flags"} + + return NewAttachedGinkgoFlagSet(flag.CommandLine, flags, bindings, FlagSections, extraGoFlagsSection) +} + +// VetConfig validates that the Ginkgo test process' configuration is sound +func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig ReporterConfig) []error { + errors := []error{} + + if flagSet.WasSet("count") || flagSet.WasSet("test.count") { + flag := flagSet.Lookup("count") + if flag == nil { + flag = flagSet.Lookup("test.count") + } + count, err := strconv.Atoi(flag.Value.String()) + if err != nil || count != 1 { + errors = append(errors, GinkgoErrors.InvalidGoFlagCount()) + } + } + + if flagSet.WasSet("parallel") || flagSet.WasSet("test.parallel") { + errors = append(errors, GinkgoErrors.InvalidGoFlagParallel()) + } + + if suiteConfig.ParallelTotal < 1 { + errors = append(errors, GinkgoErrors.InvalidParallelTotalConfiguration()) + } + + if suiteConfig.ParallelProcess > suiteConfig.ParallelTotal || suiteConfig.ParallelProcess < 1 { + errors = append(errors, GinkgoErrors.InvalidParallelProcessConfiguration()) + } + + if suiteConfig.ParallelTotal > 1 && suiteConfig.ParallelHost == "" { + errors = append(errors, GinkgoErrors.MissingParallelHostConfiguration()) + } + + if suiteConfig.DryRun && suiteConfig.ParallelTotal > 1 { + errors = append(errors, GinkgoErrors.DryRunInParallelConfiguration()) + } + + if suiteConfig.GracePeriod <= 0 { + errors = append(errors, GinkgoErrors.GracePeriodCannotBeZero()) + } + + if len(suiteConfig.FocusFiles) > 0 { + _, err := ParseFileFilters(suiteConfig.FocusFiles) + if err != nil { + errors = append(errors, err) + } + } + + if len(suiteConfig.SkipFiles) > 0 { + _, err := ParseFileFilters(suiteConfig.SkipFiles) + if err != nil { + errors = append(errors, err) + } + } + + if suiteConfig.LabelFilter != "" { + _, err := ParseLabelFilter(suiteConfig.LabelFilter) + if err != nil { + errors = append(errors, err) + } + } + + switch strings.ToLower(suiteConfig.OutputInterceptorMode) { + case "", "dup", "swap", "none": + default: + errors = append(errors, GinkgoErrors.InvalidOutputInterceptorModeConfiguration(suiteConfig.OutputInterceptorMode)) + } + + numVerbosity := 0 + for _, v := range []bool{reporterConfig.Succinct, reporterConfig.Verbose, reporterConfig.VeryVerbose} { + if v { + numVerbosity++ + } + } + if numVerbosity > 1 { + errors = append(errors, GinkgoErrors.ConflictingVerbosityConfiguration()) + } + + return errors +} + +// GinkgoCLISharedFlags provides flags shared by the Ginkgo CLI's build, watch, and run commands +var GinkgoCLISharedFlags = GinkgoFlags{ + {KeyPath: "C.Recurse", Name: "r", SectionKey: "multiple-suites", + Usage: "If set, ginkgo finds and runs test suites under the current directory recursively."}, + {KeyPath: "C.SkipPackage", Name: "skip-package", SectionKey: "multiple-suites", DeprecatedName: "skipPackage", DeprecatedDocLink: "changed-command-line-flags", + UsageArgument: "comma-separated list of packages", + Usage: "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored."}, + {KeyPath: "C.RequireSuite", Name: "require-suite", SectionKey: "failure", DeprecatedName: "requireSuite", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, Ginkgo fails if there are ginkgo tests in a directory but no invocation of RunSpecs."}, + {KeyPath: "C.NumCompilers", Name: "compilers", SectionKey: "multiple-suites", UsageDefaultValue: "0 (will autodetect)", + Usage: "When running multiple packages, the number of concurrent compilations to perform."}, +} + +// GinkgoCLIRunAndWatchFlags provides flags shared by the Ginkgo CLI's build and watch commands (but not run) +var GinkgoCLIRunAndWatchFlags = GinkgoFlags{ + {KeyPath: "C.Procs", Name: "procs", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)", + Usage: "The number of parallel test nodes to run."}, + {KeyPath: "C.Procs", Name: "nodes", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)", + Usage: "--nodes is an alias for --procs"}, + {KeyPath: "C.Parallel", Name: "p", SectionKey: "parallel", + Usage: "If set, ginkgo will run in parallel with an auto-detected number of nodes."}, + {KeyPath: "C.AfterRunHook", Name: "after-run-hook", SectionKey: "misc", DeprecatedName: "afterSuiteHook", DeprecatedDocLink: "changed-command-line-flags", + Usage: "Command to run when a test suite completes."}, + {KeyPath: "C.OutputDir", Name: "output-dir", SectionKey: "output", UsageArgument: "directory", DeprecatedName: "outputdir", DeprecatedDocLink: "improved-profiling-support", + Usage: "A location to place all generated profiles and reports."}, + {KeyPath: "C.KeepSeparateCoverprofiles", Name: "keep-separate-coverprofiles", SectionKey: "code-and-coverage-analysis", + Usage: "If set, Ginkgo does not merge coverprofiles into one monolithic coverprofile. The coverprofiles will remain in their respective package directories or in -output-dir if set."}, + {KeyPath: "C.KeepSeparateReports", Name: "keep-separate-reports", SectionKey: "output", + Usage: "If set, Ginkgo does not merge per-suite reports (e.g. -json-report) into one monolithic report for the entire testrun. The reports will remain in their respective package directories or in -output-dir if set."}, + + {KeyPath: "D.Stream", DeprecatedName: "stream", DeprecatedDocLink: "removed--stream", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.Notify", DeprecatedName: "notify", DeprecatedDocLink: "removed--notify", DeprecatedVersion: "2.0.0"}, +} + +// GinkgoCLIRunFlags provides flags for Ginkgo CLI's run command that aren't shared by any other commands +var GinkgoCLIRunFlags = GinkgoFlags{ + {KeyPath: "C.KeepGoing", Name: "keep-going", SectionKey: "multiple-suites", DeprecatedName: "keepGoing", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, failures from earlier test suites do not prevent later test suites from running."}, + {KeyPath: "C.UntilItFails", Name: "until-it-fails", SectionKey: "debug", DeprecatedName: "untilItFails", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will keep rerunning test suites until a failure occurs."}, + {KeyPath: "C.Repeat", Name: "repeat", SectionKey: "debug", UsageArgument: "n", UsageDefaultValue: "0 - i.e. no repetition, run only once", + Usage: "The number of times to re-run a test-suite. Useful for debugging flaky tests. If set to N the suite will be run N+1 times and will be required to pass each time."}, + {KeyPath: "C.RandomizeSuites", Name: "randomize-suites", SectionKey: "order", DeprecatedName: "randomizeSuites", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will randomize the order in which test suites run."}, +} + +// GinkgoCLIRunFlags provides flags for Ginkgo CLI's watch command that aren't shared by any other commands +var GinkgoCLIWatchFlags = GinkgoFlags{ + {KeyPath: "C.Depth", Name: "depth", SectionKey: "watch", + Usage: "Ginkgo will watch dependencies down to this depth in the dependency tree."}, + {KeyPath: "C.WatchRegExp", Name: "watch-regexp", SectionKey: "watch", DeprecatedName: "watchRegExp", DeprecatedDocLink: "changed-command-line-flags", + UsageArgument: "Regular Expression", + UsageDefaultValue: `\.go$`, + Usage: "Only files matching this regular expression will be watched for changes."}, +} + +// GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI +var GoBuildFlags = GinkgoFlags{ + {KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis", + Usage: "enable data race detection. Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64, linux/ppc64le and linux/arm64 (only for 48-bit VMA)."}, + {KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis", + Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`}, + {KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis", + Usage: "Enable coverage analysis. Note that because coverage works by annotating the source code before compilation, compilation and test failures with coverage enabled may report line numbers that don't correspond to the original sources."}, + {KeyPath: "Go.CoverMode", Name: "covermode", UsageArgument: "set,count,atomic", SectionKey: "code-and-coverage-analysis", + Usage: `Set the mode for coverage analysis for the package[s] being tested. 'set': does this statement run? 'count': how many times does this statement run? 'atomic': like count, but correct in multithreaded tests and more expensive (must use atomic with -race). Sets -cover`}, + {KeyPath: "Go.CoverPkg", Name: "coverpkg", UsageArgument: "pattern1,pattern2,pattern3", SectionKey: "code-and-coverage-analysis", + Usage: "Apply coverage analysis in each test to packages matching the patterns. The default is for each test to analyze only the package being tested. See 'go help packages' for a description of package patterns. Sets -cover."}, + + {KeyPath: "Go.A", Name: "a", SectionKey: "go-build", + Usage: "force rebuilding of packages that are already up-to-date."}, + {KeyPath: "Go.ASMFlags", Name: "asmflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each go tool asm invocation."}, + {KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build", + Usage: "build mode to use. See 'go help buildmode' for more."}, + {KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build", + Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."}, + {KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each gccgo compiler/linker invocation."}, + {KeyPath: "Go.GCFlags", Name: "gcflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each go tool compile invocation."}, + {KeyPath: "Go.InstallSuffix", Name: "installsuffix", SectionKey: "go-build", + Usage: "a suffix to use in the name of the package installation directory, in order to keep output separate from default builds. If using the -race flag, the install suffix is automatically set to raceor, if set explicitly, has _race appended to it. Likewise for the -msan flag. Using a -buildmode option that requires non-default compile flags has a similar effect."}, + {KeyPath: "Go.LDFlags", Name: "ldflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each go tool link invocation."}, + {KeyPath: "Go.LinkShared", Name: "linkshared", SectionKey: "go-build", + Usage: "build code that will be linked against shared libraries previously created with -buildmode=shared."}, + {KeyPath: "Go.Mod", Name: "mod", UsageArgument: "mode (readonly, vendor, or mod)", SectionKey: "go-build", + Usage: "module download mode to use: readonly, vendor, or mod. See 'go help modules' for more."}, + {KeyPath: "Go.ModCacheRW", Name: "modcacherw", SectionKey: "go-build", + Usage: "leave newly-created directories in the module cache read-write instead of making them read-only."}, + {KeyPath: "Go.ModFile", Name: "modfile", UsageArgument: "file", SectionKey: "go-build", + Usage: `in module aware mode, read (and possibly write) an alternate go.mod file instead of the one in the module root directory. A file named go.mod must still be present in order to determine the module root directory, but it is not accessed. When -modfile is specified, an alternate go.sum file is also used: its path is derived from the -modfile flag by trimming the ".mod" extension and appending ".sum".`}, + {KeyPath: "Go.MSan", Name: "msan", SectionKey: "go-build", + Usage: "enable interoperation with memory sanitizer. Supported only on linux/amd64, linux/arm64 and only with Clang/LLVM as the host C compiler. On linux/arm64, pie build mode will be used."}, + {KeyPath: "Go.N", Name: "n", SectionKey: "go-build", + Usage: "print the commands but do not run them."}, + {KeyPath: "Go.PkgDir", Name: "pkgdir", UsageArgument: "dir", SectionKey: "go-build", + Usage: "install and load all packages from dir instead of the usual locations. For example, when building with a non-standard configuration, use -pkgdir to keep generated packages in a separate location."}, + {KeyPath: "Go.Tags", Name: "tags", UsageArgument: "tag,list", SectionKey: "go-build", + Usage: "a comma-separated list of build tags to consider satisfied during the build. For more information about build tags, see the description of build constraints in the documentation for the go/build package. (Earlier versions of Go used a space-separated list, and that form is deprecated but still recognized.)"}, + {KeyPath: "Go.TrimPath", Name: "trimpath", SectionKey: "go-build", + Usage: `remove all file system paths from the resulting executable. Instead of absolute file system paths, the recorded file names will begin with either "go" (for the standard library), or a module path@version (when using modules), or a plain import path (when using GOPATH).`}, + {KeyPath: "Go.ToolExec", Name: "toolexec", UsageArgument: "'cmd args'", SectionKey: "go-build", + Usage: "a program to use to invoke toolchain programs like vet and asm. For example, instead of running asm, the go command will run cmd args /path/to/asm '."}, + {KeyPath: "Go.Work", Name: "work", SectionKey: "go-build", + Usage: "print the name of the temporary work directory and do not delete it when exiting."}, + {KeyPath: "Go.X", Name: "x", SectionKey: "go-build", + Usage: "print the commands."}, +} + +// GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI +var GoRunFlags = GinkgoFlags{ + {KeyPath: "Go.CoverProfile", Name: "coverprofile", UsageArgument: "file", SectionKey: "code-and-coverage-analysis", + Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover.`}, + {KeyPath: "Go.BlockProfile", Name: "blockprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write a goroutine blocking profile to the specified file when all tests are complete. Preserves test binary.`}, + {KeyPath: "Go.BlockProfileRate", Name: "blockprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis", + Usage: `Control the detail provided in goroutine blocking profiles by calling runtime.SetBlockProfileRate with rate. See 'go doc runtime.SetBlockProfileRate'. The profiler aims to sample, on average, one blocking event every n nanoseconds the program spends blocked. By default, if -test.blockprofile is set without this flag, all blocking events are recorded, equivalent to -test.blockprofilerate=1.`}, + {KeyPath: "Go.CPUProfile", Name: "cpuprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write a CPU profile to the specified file before exiting. Preserves test binary.`}, + {KeyPath: "Go.MemProfile", Name: "memprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write an allocation profile to the file after all tests have passed. Preserves test binary.`}, + {KeyPath: "Go.MemProfileRate", Name: "memprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis", + Usage: `Enable more precise (and expensive) memory allocation profiles by setting runtime.MemProfileRate. See 'go doc runtime.MemProfileRate'. To profile all memory allocations, use -test.memprofilerate=1.`}, + {KeyPath: "Go.MutexProfile", Name: "mutexprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write a mutex contention profile to the specified file when all tests are complete. Preserves test binary.`}, + {KeyPath: "Go.MutexProfileFraction", Name: "mutexprofilefraction", UsageArgument: "n", SectionKey: "performance-analysis", + Usage: `if >= 0, calls runtime.SetMutexProfileFraction() Sample 1 in n stack traces of goroutines holding a contended mutex.`}, + {KeyPath: "Go.Trace", Name: "execution-trace", UsageArgument: "file", ExportAs: "trace", SectionKey: "performance-analysis", + Usage: `Write an execution trace to the specified file before exiting.`}, +} + +// VetAndInitializeCLIAndGoConfig validates that the Ginkgo CLI's configuration is sound +// It returns a potentially mutated copy of the config that rationalizes the configuration to ensure consistency for downstream consumers +func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsConfig) (CLIConfig, GoFlagsConfig, []error) { + errors := []error{} + + if cliConfig.Repeat > 0 && cliConfig.UntilItFails { + errors = append(errors, GinkgoErrors.BothRepeatAndUntilItFails()) + } + + //initialize the output directory + if cliConfig.OutputDir != "" { + err := os.MkdirAll(cliConfig.OutputDir, 0777) + if err != nil { + errors = append(errors, err) + } + } + + //ensure cover mode is configured appropriately + if goFlagsConfig.CoverMode != "" || goFlagsConfig.CoverPkg != "" || goFlagsConfig.CoverProfile != "" { + goFlagsConfig.Cover = true + } + if goFlagsConfig.Cover && goFlagsConfig.CoverProfile == "" { + goFlagsConfig.CoverProfile = "coverprofile.out" + } + + return cliConfig, goFlagsConfig, errors +} + +// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test +func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string, pathToInvocationPath string) ([]string, error) { + // if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure + // the built test binary can generate a coverprofile + if goFlagsConfig.CoverProfile != "" { + goFlagsConfig.Cover = true + } + + if goFlagsConfig.CoverPkg != "" { + coverPkgs := strings.Split(goFlagsConfig.CoverPkg, ",") + adjustedCoverPkgs := make([]string, len(coverPkgs)) + for i, coverPkg := range coverPkgs { + coverPkg = strings.Trim(coverPkg, " ") + if strings.HasPrefix(coverPkg, "./") { + // this is a relative coverPkg - we need to reroot it + adjustedCoverPkgs[i] = "./" + filepath.Join(pathToInvocationPath, strings.TrimPrefix(coverPkg, "./")) + } else { + // this is a package name - don't touch it + adjustedCoverPkgs[i] = coverPkg + } + } + goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",") + } + + args := []string{"test", "-c", "-o", destination, packageToBuild} + goArgs, err := GenerateFlagArgs( + GoBuildFlags, + map[string]interface{}{ + "Go": &goFlagsConfig, + }, + ) + + if err != nil { + return []string{}, err + } + args = append(args, goArgs...) + return args, nil +} + +// GenerateGinkgoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled Ginkgo test binary +func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterConfig, goFlagsConfig GoFlagsConfig) ([]string, error) { + var flags GinkgoFlags + flags = SuiteConfigFlags.WithPrefix("ginkgo") + flags = flags.CopyAppend(ParallelConfigFlags.WithPrefix("ginkgo")...) + flags = flags.CopyAppend(ReporterConfigFlags.WithPrefix("ginkgo")...) + flags = flags.CopyAppend(GoRunFlags.WithPrefix("test")...) + bindings := map[string]interface{}{ + "S": &suiteConfig, + "R": &reporterConfig, + "Go": &goFlagsConfig, + } + + return GenerateFlagArgs(flags, bindings) +} + +// GenerateGoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled non-Ginkgo test binary +func GenerateGoTestRunArgs(goFlagsConfig GoFlagsConfig) ([]string, error) { + flags := GoRunFlags.WithPrefix("test") + bindings := map[string]interface{}{ + "Go": &goFlagsConfig, + } + + args, err := GenerateFlagArgs(flags, bindings) + if err != nil { + return args, err + } + args = append(args, "--test.v") + return args, nil +} + +// BuildRunCommandFlagSet builds the FlagSet for the `ginkgo run` command +func BuildRunCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { + flags := SuiteConfigFlags + flags = flags.CopyAppend(ReporterConfigFlags...) + flags = flags.CopyAppend(GinkgoCLISharedFlags...) + flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...) + flags = flags.CopyAppend(GinkgoCLIRunFlags...) + flags = flags.CopyAppend(GoBuildFlags...) + flags = flags.CopyAppend(GoRunFlags...) + + bindings := map[string]interface{}{ + "S": suiteConfig, + "R": reporterConfig, + "C": cliConfig, + "Go": goFlagsConfig, + "D": &deprecatedConfig{}, + } + + return NewGinkgoFlagSet(flags, bindings, FlagSections) +} + +// BuildWatchCommandFlagSet builds the FlagSet for the `ginkgo watch` command +func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { + flags := SuiteConfigFlags + flags = flags.CopyAppend(ReporterConfigFlags...) + flags = flags.CopyAppend(GinkgoCLISharedFlags...) + flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...) + flags = flags.CopyAppend(GinkgoCLIWatchFlags...) + flags = flags.CopyAppend(GoBuildFlags...) + flags = flags.CopyAppend(GoRunFlags...) + + bindings := map[string]interface{}{ + "S": suiteConfig, + "R": reporterConfig, + "C": cliConfig, + "Go": goFlagsConfig, + "D": &deprecatedConfig{}, + } + + return NewGinkgoFlagSet(flags, bindings, FlagSections) +} + +// BuildBuildCommandFlagSet builds the FlagSet for the `ginkgo build` command +func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { + flags := GinkgoCLISharedFlags + flags = flags.CopyAppend(GoBuildFlags...) + + bindings := map[string]interface{}{ + "C": cliConfig, + "Go": goFlagsConfig, + "D": &deprecatedConfig{}, + } + + flagSections := make(GinkgoFlagSections, len(FlagSections)) + copy(flagSections, FlagSections) + for i := range flagSections { + if flagSections[i].Key == "multiple-suites" { + flagSections[i].Heading = "Building Multiple Suites" + } + if flagSections[i].Key == "go-build" { + flagSections[i] = GinkgoFlagSection{Key: "go-build", Style: "{{/}}", Heading: "Go Build Flags", + Description: "These flags are inherited from go build."} + } + } + + return NewGinkgoFlagSet(flags, bindings, flagSections) +} + +func BuildLabelsCommandFlagSet(cliConfig *CLIConfig) (GinkgoFlagSet, error) { + flags := GinkgoCLISharedFlags.SubsetWithNames("r", "skip-package") + + bindings := map[string]interface{}{ + "C": cliConfig, + } + + flagSections := make(GinkgoFlagSections, len(FlagSections)) + copy(flagSections, FlagSections) + for i := range flagSections { + if flagSections[i].Key == "multiple-suites" { + flagSections[i].Heading = "Fetching Labels from Multiple Suites" + } + } + + return NewGinkgoFlagSet(flags, bindings, flagSections) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go new file mode 100644 index 00000000..17922304 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go @@ -0,0 +1,141 @@ +package types + +import ( + "strconv" + "time" +) + +/* + A set of deprecations to make the transition from v1 to v2 easier for users who have written custom reporters. +*/ + +type SuiteSummary = DeprecatedSuiteSummary +type SetupSummary = DeprecatedSetupSummary +type SpecSummary = DeprecatedSpecSummary +type SpecMeasurement = DeprecatedSpecMeasurement +type SpecComponentType = NodeType +type SpecFailure = DeprecatedSpecFailure + +var ( + SpecComponentTypeInvalid = NodeTypeInvalid + SpecComponentTypeContainer = NodeTypeContainer + SpecComponentTypeIt = NodeTypeIt + SpecComponentTypeBeforeEach = NodeTypeBeforeEach + SpecComponentTypeJustBeforeEach = NodeTypeJustBeforeEach + SpecComponentTypeAfterEach = NodeTypeAfterEach + SpecComponentTypeJustAfterEach = NodeTypeJustAfterEach + SpecComponentTypeBeforeSuite = NodeTypeBeforeSuite + SpecComponentTypeSynchronizedBeforeSuite = NodeTypeSynchronizedBeforeSuite + SpecComponentTypeAfterSuite = NodeTypeAfterSuite + SpecComponentTypeSynchronizedAfterSuite = NodeTypeSynchronizedAfterSuite +) + +type DeprecatedSuiteSummary struct { + SuiteDescription string + SuiteSucceeded bool + SuiteID string + + NumberOfSpecsBeforeParallelization int + NumberOfTotalSpecs int + NumberOfSpecsThatWillBeRun int + NumberOfPendingSpecs int + NumberOfSkippedSpecs int + NumberOfPassedSpecs int + NumberOfFailedSpecs int + NumberOfFlakedSpecs int + RunTime time.Duration +} + +type DeprecatedSetupSummary struct { + ComponentType SpecComponentType + CodeLocation CodeLocation + + State SpecState + RunTime time.Duration + Failure SpecFailure + + CapturedOutput string + SuiteID string +} + +type DeprecatedSpecSummary struct { + ComponentTexts []string + ComponentCodeLocations []CodeLocation + + State SpecState + RunTime time.Duration + Failure SpecFailure + IsMeasurement bool + NumberOfSamples int + Measurements map[string]*DeprecatedSpecMeasurement + + CapturedOutput string + SuiteID string +} + +func (s DeprecatedSpecSummary) HasFailureState() bool { + return s.State.Is(SpecStateFailureStates) +} + +func (s DeprecatedSpecSummary) TimedOut() bool { + return false +} + +func (s DeprecatedSpecSummary) Panicked() bool { + return s.State == SpecStatePanicked +} + +func (s DeprecatedSpecSummary) Failed() bool { + return s.State == SpecStateFailed +} + +func (s DeprecatedSpecSummary) Passed() bool { + return s.State == SpecStatePassed +} + +func (s DeprecatedSpecSummary) Skipped() bool { + return s.State == SpecStateSkipped +} + +func (s DeprecatedSpecSummary) Pending() bool { + return s.State == SpecStatePending +} + +type DeprecatedSpecFailure struct { + Message string + Location CodeLocation + ForwardedPanic string + + ComponentIndex int + ComponentType SpecComponentType + ComponentCodeLocation CodeLocation +} + +type DeprecatedSpecMeasurement struct { + Name string + Info interface{} + Order int + + Results []float64 + + Smallest float64 + Largest float64 + Average float64 + StdDeviation float64 + + SmallestLabel string + LargestLabel string + AverageLabel string + Units string + Precision int +} + +func (s DeprecatedSpecMeasurement) PrecisionFmt() string { + if s.Precision == 0 { + return "%f" + } + + str := strconv.Itoa(s.Precision) + + return "%." + str + "f" +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go new file mode 100644 index 00000000..e2519f67 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go @@ -0,0 +1,177 @@ +package types + +import ( + "os" + "strconv" + "strings" + "sync" + "unicode" + + "github.com/onsi/ginkgo/v2/formatter" +) + +type Deprecation struct { + Message string + DocLink string + Version string +} + +type deprecations struct{} + +var Deprecations = deprecations{} + +func (d deprecations) CustomReporter() Deprecation { + return Deprecation{ + Message: "Support for custom reporters has been removed in V2. Please read the documentation linked to below for Ginkgo's new behavior and for a migration path:", + DocLink: "removed-custom-reporters", + Version: "1.16.0", + } +} + +func (d deprecations) Async() Deprecation { + return Deprecation{ + Message: "You are passing a Done channel to a test node to test asynchronous behavior. This is deprecated in Ginkgo V2. Your test will run synchronously and the timeout will be ignored.", + DocLink: "removed-async-testing", + Version: "1.16.0", + } +} + +func (d deprecations) Measure() Deprecation { + return Deprecation{ + Message: "Measure is deprecated and has been removed from Ginkgo V2. Any Measure tests in your spec will not run. Please migrate to gomega/gmeasure.", + DocLink: "removed-measure", + Version: "1.16.3", + } +} + +func (d deprecations) ParallelNode() Deprecation { + return Deprecation{ + Message: "GinkgoParallelNode is deprecated and will be removed in Ginkgo V2. Please use GinkgoParallelProcess instead.", + DocLink: "renamed-ginkgoparallelnode", + Version: "1.16.4", + } +} + +func (d deprecations) CurrentGinkgoTestDescription() Deprecation { + return Deprecation{ + Message: "CurrentGinkgoTestDescription() is deprecated in Ginkgo V2. Use CurrentSpecReport() instead.", + DocLink: "changed-currentginkgotestdescription", + Version: "1.16.0", + } +} + +func (d deprecations) Convert() Deprecation { + return Deprecation{ + Message: "The convert command is deprecated in Ginkgo V2", + DocLink: "removed-ginkgo-convert", + Version: "1.16.0", + } +} + +func (d deprecations) Blur() Deprecation { + return Deprecation{ + Message: "The blur command is deprecated in Ginkgo V2. Use 'ginkgo unfocus' instead.", + Version: "1.16.0", + } +} + +func (d deprecations) Nodot() Deprecation { + return Deprecation{ + Message: "The nodot command is deprecated in Ginkgo V2. Please either dot-import Ginkgo or use the package identifier in your code to references objects and types provided by Ginkgo and Gomega.", + DocLink: "removed-ginkgo-nodot", + Version: "1.16.0", + } +} + +func (d deprecations) SuppressProgressReporting() Deprecation { + return Deprecation{ + Message: "Improvements to how reporters emit timeline information means that SuppressProgressReporting is no longer necessary and has been deprecated.", + Version: "2.5.0", + } +} + +type DeprecationTracker struct { + deprecations map[Deprecation][]CodeLocation + lock *sync.Mutex +} + +func NewDeprecationTracker() *DeprecationTracker { + return &DeprecationTracker{ + deprecations: map[Deprecation][]CodeLocation{}, + lock: &sync.Mutex{}, + } +} + +func (d *DeprecationTracker) TrackDeprecation(deprecation Deprecation, cl ...CodeLocation) { + ackVersion := os.Getenv("ACK_GINKGO_DEPRECATIONS") + if deprecation.Version != "" && ackVersion != "" { + ack := ParseSemVer(ackVersion) + version := ParseSemVer(deprecation.Version) + if ack.GreaterThanOrEqualTo(version) { + return + } + } + + d.lock.Lock() + defer d.lock.Unlock() + if len(cl) == 1 { + d.deprecations[deprecation] = append(d.deprecations[deprecation], cl[0]) + } else { + d.deprecations[deprecation] = []CodeLocation{} + } +} + +func (d *DeprecationTracker) DidTrackDeprecations() bool { + d.lock.Lock() + defer d.lock.Unlock() + return len(d.deprecations) > 0 +} + +func (d *DeprecationTracker) DeprecationsReport() string { + d.lock.Lock() + defer d.lock.Unlock() + out := formatter.F("{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n") + out += formatter.F("{{light-yellow}}============================================={{/}}\n") + for deprecation, locations := range d.deprecations { + out += formatter.Fi(1, "{{yellow}}"+deprecation.Message+"{{/}}\n") + if deprecation.DocLink != "" { + out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://onsi.github.io/ginkgo/MIGRATING_TO_V2#%s{{/}}\n", deprecation.DocLink) + } + for _, location := range locations { + out += formatter.Fi(2, "{{gray}}%s{{/}}\n", location) + } + } + out += formatter.F("\n{{gray}}To silence deprecations that can be silenced set the following environment variable:{{/}}\n") + out += formatter.Fi(1, "{{gray}}ACK_GINKGO_DEPRECATIONS=%s{{/}}\n", VERSION) + return out +} + +type SemVer struct { + Major int + Minor int + Patch int +} + +func (s SemVer) GreaterThanOrEqualTo(o SemVer) bool { + return (s.Major > o.Major) || + (s.Major == o.Major && s.Minor > o.Minor) || + (s.Major == o.Major && s.Minor == o.Minor && s.Patch >= o.Patch) +} + +func ParseSemVer(semver string) SemVer { + out := SemVer{} + semver = strings.TrimFunc(semver, func(r rune) bool { + return !(unicode.IsNumber(r) || r == '.') + }) + components := strings.Split(semver, ".") + if len(components) > 0 { + out.Major, _ = strconv.Atoi(components[0]) + } + if len(components) > 1 { + out.Minor, _ = strconv.Atoi(components[1]) + } + if len(components) > 2 { + out.Patch, _ = strconv.Atoi(components[2]) + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go b/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go new file mode 100644 index 00000000..1d96ae02 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go @@ -0,0 +1,43 @@ +package types + +import "encoding/json" + +type EnumSupport struct { + toString map[uint]string + toEnum map[string]uint + maxEnum uint +} + +func NewEnumSupport(toString map[uint]string) EnumSupport { + toEnum, maxEnum := map[string]uint{}, uint(0) + for k, v := range toString { + toEnum[v] = k + if maxEnum < k { + maxEnum = k + } + } + return EnumSupport{toString: toString, toEnum: toEnum, maxEnum: maxEnum} +} + +func (es EnumSupport) String(e uint) string { + if e > es.maxEnum { + return es.toString[0] + } + return es.toString[e] +} + +func (es EnumSupport) UnmarshJSON(b []byte) (uint, error) { + var dec string + if err := json.Unmarshal(b, &dec); err != nil { + return 0, err + } + out := es.toEnum[dec] // if we miss we get 0 which is what we want anyway + return out, nil +} + +func (es EnumSupport) MarshJSON(e uint) ([]byte, error) { + if e == 0 || e > es.maxEnum { + return json.Marshal(nil) + } + return json.Marshal(es.toString[e]) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go new file mode 100644 index 00000000..6bb72d00 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -0,0 +1,639 @@ +package types + +import ( + "fmt" + "reflect" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" +) + +type GinkgoError struct { + Heading string + Message string + DocLink string + CodeLocation CodeLocation +} + +func (g GinkgoError) Error() string { + out := formatter.F("{{bold}}{{red}}%s{{/}}\n", g.Heading) + if (g.CodeLocation != CodeLocation{}) { + contentsOfLine := strings.TrimLeft(g.CodeLocation.ContentsOfLine(), "\t ") + if contentsOfLine != "" { + out += formatter.F("{{light-gray}}%s{{/}}\n", contentsOfLine) + } + out += formatter.F("{{gray}}%s{{/}}\n", g.CodeLocation) + } + if g.Message != "" { + out += formatter.Fiw(1, formatter.COLS, g.Message) + out += "\n\n" + } + if g.DocLink != "" { + out += formatter.Fiw(1, formatter.COLS, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}\n", g.DocLink) + } + + return out +} + +type ginkgoErrors struct{} + +var GinkgoErrors = ginkgoErrors{} + +func (g ginkgoErrors) UncaughtGinkgoPanic(cl CodeLocation) error { + return GinkgoError{ + Heading: "Your Test Panicked", + Message: `When you, or your assertion library, calls Ginkgo's Fail(), +Ginkgo panics to prevent subsequent assertions from running. + +Normally Ginkgo rescues this panic so you shouldn't see it. + +However, if you make an assertion in a goroutine, Ginkgo can't capture the panic. +To circumvent this, you should call + + defer GinkgoRecover() + +at the top of the goroutine that caused this panic. + +Alternatively, you may have made an assertion outside of a Ginkgo +leaf node (e.g. in a container node or some out-of-band function) - please move your assertion to +an appropriate Ginkgo node (e.g. a BeforeSuite, BeforeEach, It, etc...).`, + DocLink: "mental-model-how-ginkgo-handles-failure", + CodeLocation: cl, + } +} + +func (g ginkgoErrors) RerunningSuite() error { + return GinkgoError{ + Heading: "Rerunning Suite", + Message: formatter.F(`It looks like you are calling RunSpecs more than once. Ginkgo does not support rerunning suites. If you want to rerun a suite try {{bold}}ginkgo --repeat=N{{/}} or {{bold}}ginkgo --until-it-fails{{/}}`), + DocLink: "repeating-spec-runs-and-managing-flaky-specs", + } +} + +/* Tree construction errors */ + +func (g ginkgoErrors) PushingNodeInRunPhase(nodeType NodeType, cl CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node +to the Ginkgo spec tree in a leaf node {{bold}}after{{/}} the specs started running. + +To enable randomization and parallelization Ginkgo requires the spec tree +to be fully constructed up front. In practice, this means that you can +only create nodes like {{bold}}[%s]{{/}} at the top-level or within the +body of a {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}.`, nodeType, nodeType), + CodeLocation: cl, + DocLink: "mental-model-how-ginkgo-traverses-the-spec-hierarchy", + } +} + +func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic interface{}, cl CodeLocation) error { + return GinkgoError{ + Heading: "Assertion or Panic detected during tree construction", + Message: formatter.F( + `Ginkgo detected a panic while constructing the spec tree. +You may be trying to make an assertion in the body of a container node +(i.e. {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}). + +Please ensure all assertions are inside leaf nodes such as {{bold}}BeforeEach{{/}}, +{{bold}}It{{/}}, etc. + +{{bold}}Here's the content of the panic that was caught:{{/}} +%v`, caughtPanic), + CodeLocation: cl, + DocLink: "no-assertions-in-container-nodes", + } +} + +func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocation) error { + docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite" + if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) { + docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite" + } + + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node within a container node. + +{{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType), + CodeLocation: cl, + DocLink: docLink, + } +} + +func (g ginkgoErrors) SuiteNodeDuringRunPhase(nodeType NodeType, cl CodeLocation) error { + docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite" + if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) { + docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite" + } + + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node within a leaf node after the spec started running. + +{{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType), + CodeLocation: cl, + DocLink: docLink, + } +} + +func (g ginkgoErrors) MultipleBeforeSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error { + return ginkgoErrorMultipleSuiteNodes("setup", nodeType, cl, earlierNodeType, earlierCodeLocation) +} + +func (g ginkgoErrors) MultipleAfterSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error { + return ginkgoErrorMultipleSuiteNodes("teardown", nodeType, cl, earlierNodeType, earlierCodeLocation) +} + +func ginkgoErrorMultipleSuiteNodes(setupOrTeardown string, nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node but +you already have a {{bold}}[%s]{{/}} node defined at: {{gray}}%s{{/}}. + +Ginkgo only allows you to define one suite %s node.`, nodeType, earlierNodeType, earlierCodeLocation, setupOrTeardown), + CodeLocation: cl, + DocLink: "suite-setup-and-cleanup-beforesuite-and-aftersuite", + } +} + +/* Decorator errors */ +func (g ginkgoErrors) InvalidDecoratorForNodeType(cl CodeLocation, nodeType NodeType, decorator string) error { + return GinkgoError{ + Heading: "Invalid Decorator", + Message: formatter.F(`[%s] node cannot be passed a(n) '%s' decorator`, nodeType, decorator), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidDeclarationOfFocusedAndPending(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Combination of Decorators: Focused and Pending", + Message: formatter.F(`[%s] node was decorated with both Focus and Pending. At most one is allowed.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Combination of Decorators: FlakeAttempts and MustPassRepeatedly", + Message: formatter.F(`[%s] node was decorated with both FlakeAttempts and MustPassRepeatedly. At most one is allowed.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator interface{}) error { + return GinkgoError{ + Heading: "Unknown Decorator", + Message: formatter.F(`[%s] node was passed an unknown decorator: '%#v'`, nodeType, decorator), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidBodyTypeForContainer(t reflect.Type, cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[%s] node must be passed {{bold}}func(){{/}} - i.e. functions that take nothing and return nothing. You passed {{bold}}%s{{/}} instead.`, nodeType, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidBodyType(t reflect.Type, cl CodeLocation, nodeType NodeType) error { + mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}" + if nodeType.Is(NodeTypeContainer) { + mustGet = "{{bold}}func(){{/}}" + } + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[%s] node must be passed `+mustGet+`. +You passed {{bold}}%s{{/}} instead.`, nodeType, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteProc1(t reflect.Type, cl CodeLocation) error { + mustGet := "{{bold}}func() []byte{{/}}, {{bold}}func(ctx SpecContext) []byte{{/}}, or {{bold}}func(ctx context.Context) []byte{{/}}, {{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}" + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its first function. +You passed {{bold}}%s{{/}} instead.`, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteAllProcs(t reflect.Type, cl CodeLocation) error { + mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}, {{bold}}func([]byte){{/}}, {{bold}}func(ctx SpecContext, []byte){{/}}, or {{bold}}func(ctx context.Context, []byte){{/}}" + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its second function. +You passed {{bold}}%s{{/}} instead.`, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) MultipleBodyFunctions(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Multiple Functions", + Message: formatter.F(`[%s] node must be passed a single function - but more than one was passed in.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) MissingBodyFunction(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Missing Functions", + Message: formatter.F(`[%s] node must be passed a single function - but none was passed in.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextNode(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod", + Message: formatter.F(`[%s] was passed NodeTimeout, SpecTimeout, or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`, nodeType), + CodeLocation: cl, + DocLink: "spec-timeouts-and-interruptible-nodes", + } +} + +func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextCleanupNode(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod", + Message: formatter.F(`[DeferCleanup] was passed NodeTimeout or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`), + CodeLocation: cl, + DocLink: "spec-timeouts-and-interruptible-nodes", + } +} + +/* Ordered Container errors */ +func (g ginkgoErrors) InvalidSerialNodeInNonSerialOrderedContainer(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Serial Node in Non-Serial Ordered Container", + Message: formatter.F(`[%s] node was decorated with Serial but occurs in an Ordered container that is not marked Serial. Move the Serial decorator to the outer-most Ordered container to mark all ordered specs within the container as serial.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) SetupNodeNotInOrderedContainer(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Setup Node not in Ordered Container", + Message: fmt.Sprintf("[%s] setup nodes must appear inside an Ordered container. They cannot be nested within other containers, even containers in an ordered container.", nodeType), + CodeLocation: cl, + DocLink: "ordered-containers", + } +} + +func (g ginkgoErrors) InvalidContinueOnFailureDecoration(cl CodeLocation) error { + return GinkgoError{ + Heading: "ContinueOnFailure not decorating an outermost Ordered Container", + Message: "ContinueOnFailure can only decorate an Ordered container, and this Ordered container must be the outermost Ordered container.", + CodeLocation: cl, + DocLink: "ordered-containers", + } +} + +/* DeferCleanup errors */ +func (g ginkgoErrors) DeferCleanupInvalidFunction(cl CodeLocation) error { + return GinkgoError{ + Heading: "DeferCleanup requires a valid function", + Message: "You must pass DeferCleanup a function to invoke. This function must return zero or one values - if it does return, it must return an error. The function can take arbitrarily many arguments and you should provide these to DeferCleanup to pass along to the function.", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +func (g ginkgoErrors) PushingCleanupNodeDuringTreeConstruction(cl CodeLocation) error { + return GinkgoError{ + Heading: "DeferCleanup must be called inside a setup or subject node", + Message: "You must call DeferCleanup inside a setup node (e.g. BeforeEach, BeforeSuite, AfterAll...) or a subject node (i.e. It). You can't call DeferCleanup at the top-level or in a container node - use the After* family of setup nodes instead.", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +func (g ginkgoErrors) PushingCleanupInReportingNode(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: fmt.Sprintf("DeferCleanup cannot be called in %s", nodeType), + Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a Reporting node.", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +func (g ginkgoErrors) PushingCleanupInCleanupNode(cl CodeLocation) error { + return GinkgoError{ + Heading: "DeferCleanup cannot be called in a DeferCleanup callback", + Message: "Please inline your cleanup code - Ginkgo doesn't let you call DeferCleanup from within DeferCleanup", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +/* ReportEntry errors */ +func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg interface{}) error { + return GinkgoError{ + Heading: "Too Many ReportEntry Values", + Message: formatter.F(`{{bold}}AddGinkgoReport{{/}} can only be given one value. Got unexpected value: %#v`, arg), + CodeLocation: cl, + DocLink: "attaching-data-to-reports", + } +} + +func (g ginkgoErrors) AddReportEntryNotDuringRunPhase(cl CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F(`It looks like you are calling {{bold}}AddGinkgoReport{{/}} outside of a running spec. Make sure you call {{bold}}AddGinkgoReport{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`), + CodeLocation: cl, + DocLink: "attaching-data-to-reports", + } +} + +/* By errors */ +func (g ginkgoErrors) ByNotDuringRunPhase(cl CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F(`It looks like you are calling {{bold}}By{{/}} outside of a running spec. Make sure you call {{bold}}By{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`), + CodeLocation: cl, + DocLink: "documenting-complex-specs-by", + } +} + +/* FileFilter and SkipFilter errors */ +func (g ginkgoErrors) InvalidFileFilter(filter string) error { + return GinkgoError{ + Heading: "Invalid File Filter", + Message: fmt.Sprintf(`The provided file filter: "%s" is invalid. File filters must have the format "file", "file:lines" where "file" is a regular expression that will match against the file path and lines is a comma-separated list of integers (e.g. file:1,5,7) or line-ranges (e.g. file:1-3,5-9) or both (e.g. file:1,5-9)`, filter), + DocLink: "filtering-specs", + } +} + +func (g ginkgoErrors) InvalidFileFilterRegularExpression(filter string, err error) error { + return GinkgoError{ + Heading: "Invalid File Filter Regular Expression", + Message: fmt.Sprintf(`The provided file filter: "%s" included an invalid regular expression. regexp.Compile error: %s`, filter, err), + DocLink: "filtering-specs", + } +} + +/* Label Errors */ +func (g ginkgoErrors) SyntaxErrorParsingLabelFilter(input string, location int, error string) error { + var message string + if location >= 0 { + for i, r := range input { + if i == location { + message += "{{red}}{{bold}}{{underline}}" + } + message += string(r) + if i == location { + message += "{{/}}" + } + } + } else { + message = input + } + message += "\n" + error + return GinkgoError{ + Heading: "Syntax Error Parsing Label Filter", + Message: message, + DocLink: "spec-labels", + } +} + +func (g ginkgoErrors) InvalidLabel(label string, cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Label", + Message: fmt.Sprintf("'%s' is an invalid label. Labels cannot contain of the following characters: '&|!,()/'", label), + CodeLocation: cl, + DocLink: "spec-labels", + } +} + +func (g ginkgoErrors) InvalidEmptyLabel(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Empty Label", + Message: "Labels cannot be empty", + CodeLocation: cl, + DocLink: "spec-labels", + } +} + +/* Table errors */ +func (g ginkgoErrors) MultipleEntryBodyFunctionsForTable(cl CodeLocation) error { + return GinkgoError{ + Heading: "DescribeTable passed multiple functions", + Message: "It looks like you are passing multiple functions into DescribeTable. Only one function can be passed in. This function will be called for each Entry in the table.", + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) InvalidEntryDescription(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Entry description", + Message: "Entry description functions must be a string, a function that accepts the entry parameters and returns a string, or nil.", + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) MissingParametersForTableFunction(cl CodeLocation) error { + return GinkgoError{ + Heading: "No parameters have been passed to the Table Function", + Message: "The Table Function expected at least 1 parameter", + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) IncorrectParameterTypeForTable(i int, name string, cl CodeLocation) error { + return GinkgoError{ + Heading: "DescribeTable passed incorrect parameter type", + Message: fmt.Sprintf("Parameter #%d passed to DescribeTable is of incorrect type <%s>", i, name), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) TooFewParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Too few parameters passed in to %s", kind), + Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) TooManyParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Too many parameters passed in to %s", kind), + Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) IncorrectParameterTypeToTableFunction(i int, expected, actual reflect.Type, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind), + Message: fmt.Sprintf("The %s expected parameter #%d to be of type <%s> but you passed in <%s>", kind, i, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) IncorrectVariadicParameterTypeToTableFunction(expected, actual reflect.Type, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind), + Message: fmt.Sprintf("The %s expected its variadic parameters to be of type <%s> but you passed in <%s>", kind, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) ContextsCannotBeUsedInSubtreeTables(cl CodeLocation) error { + return GinkgoError{ + Heading: "Contexts cannot be used in subtree tables", + Message: "You''ve defined a subtree body function that accepts a context but did not provide one in the table entry. Ginkgo SpecContexts can only be passed in to subject and setup nodes - so if you are trying to implement a spec timeout you should request a context in the It function within your subtree body function, not in the subtree body function itself.", + CodeLocation: cl, + DocLink: "table-specs", + } +} + +/* Parallel Synchronization errors */ + +func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error { + return GinkgoError{ + Heading: "Test Report unavailable because a Ginkgo parallel process disappeared", + Message: "The aggregated report could not be fetched for a ReportAfterSuite node. A Ginkgo parallel process disappeared before it could finish reporting.", + } +} + +func (g ginkgoErrors) SynchronizedBeforeSuiteFailedOnProc1() error { + return GinkgoError{ + Heading: "SynchronizedBeforeSuite failed on Ginkgo parallel process #1", + Message: "The first SynchronizedBeforeSuite function running on Ginkgo parallel process #1 failed. This suite will now abort.", + } +} + +func (g ginkgoErrors) SynchronizedBeforeSuiteDisappearedOnProc1() error { + return GinkgoError{ + Heading: "Process #1 disappeared before SynchronizedBeforeSuite could report back", + Message: "Ginkgo parallel process #1 disappeared before the first SynchronizedBeforeSuite function completed. This suite will now abort.", + } +} + +/* Configuration errors */ + +func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value interface{}) error { + return GinkgoError{ + Heading: "Unknown Type passed to RunSpecs", + Message: fmt.Sprintf("RunSpecs() accepts labels, and configuration of type types.SuiteConfig and/or types.ReporterConfig.\n You passed in: %v", value), + } +} + +var sharedParallelErrorMessage = "It looks like you are trying to run specs in parallel with go test.\nThis is unsupported and you should use the ginkgo CLI instead." + +func (g ginkgoErrors) InvalidParallelTotalConfiguration() error { + return GinkgoError{ + Heading: "-ginkgo.parallel.total must be >= 1", + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) InvalidParallelProcessConfiguration() error { + return GinkgoError{ + Heading: "-ginkgo.parallel.process is one-indexed and must be <= ginkgo.parallel.total", + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) MissingParallelHostConfiguration() error { + return GinkgoError{ + Heading: "-ginkgo.parallel.host is missing", + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) UnreachableParallelHost(host string) error { + return GinkgoError{ + Heading: "Could not reach ginkgo.parallel.host:" + host, + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) DryRunInParallelConfiguration() error { + return GinkgoError{ + Heading: "Ginkgo only performs -dryRun in serial mode.", + Message: "Please try running ginkgo -dryRun again, but without -p or -procs to ensure the suite is running in series.", + } +} + +func (g ginkgoErrors) GracePeriodCannotBeZero() error { + return GinkgoError{ + Heading: "Ginkgo requires a positive --grace-period.", + Message: "Please set --grace-period to a positive duration. The default is 30s.", + } +} + +func (g ginkgoErrors) ConflictingVerbosityConfiguration() error { + return GinkgoError{ + Heading: "Conflicting reporter verbosity settings.", + Message: "You can't set more than one of -v, -vv and --succinct. Please pick one!", + } +} + +func (g ginkgoErrors) InvalidOutputInterceptorModeConfiguration(value string) error { + return GinkgoError{ + Heading: fmt.Sprintf("Invalid value '%s' for --output-interceptor-mode.", value), + Message: "You must choose one of 'dup', 'swap', or 'none'.", + } +} + +func (g ginkgoErrors) InvalidGoFlagCount() error { + return GinkgoError{ + Heading: "Use of go test -count", + Message: "Ginkgo does not support using go test -count to rerun suites. Only -count=1 is allowed. To repeat suite runs, please use the ginkgo cli and `ginkgo -until-it-fails` or `ginkgo -repeat=N`.", + } +} + +func (g ginkgoErrors) InvalidGoFlagParallel() error { + return GinkgoError{ + Heading: "Use of go test -parallel", + Message: "Go test's implementation of parallelization does not actually parallelize Ginkgo specs. Please use the ginkgo cli and `ginkgo -p` or `ginkgo -procs=N` instead.", + } +} + +func (g ginkgoErrors) BothRepeatAndUntilItFails() error { + return GinkgoError{ + Heading: "--repeat and --until-it-fails are both set", + Message: "--until-it-fails directs Ginkgo to rerun specs indefinitely until they fail. --repeat directs Ginkgo to rerun specs a set number of times. You can't set both... which would you like?", + } +} + +/* Stack-Trace parsing errors */ + +func (g ginkgoErrors) FailedToParseStackTrace(message string) error { + return GinkgoError{ + Heading: "Failed to Parse Stack Trace", + Message: message, + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go new file mode 100644 index 00000000..cc21df71 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go @@ -0,0 +1,106 @@ +package types + +import ( + "regexp" + "strconv" + "strings" +) + +func ParseFileFilters(filters []string) (FileFilters, error) { + ffs := FileFilters{} + for _, filter := range filters { + ff := FileFilter{} + if filter == "" { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + components := strings.Split(filter, ":") + if !(len(components) == 1 || len(components) == 2) { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + + var err error + ff.Filename, err = regexp.Compile(components[0]) + if err != nil { + return nil, err + } + if len(components) == 2 { + lineFilters := strings.Split(components[1], ",") + for _, lineFilter := range lineFilters { + components := strings.Split(lineFilter, "-") + if len(components) == 1 { + line, err := strconv.Atoi(strings.TrimSpace(components[0])) + if err != nil { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + ff.LineFilters = append(ff.LineFilters, LineFilter{line, line + 1}) + } else if len(components) == 2 { + line1, err := strconv.Atoi(strings.TrimSpace(components[0])) + if err != nil { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + line2, err := strconv.Atoi(strings.TrimSpace(components[1])) + if err != nil { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + ff.LineFilters = append(ff.LineFilters, LineFilter{line1, line2}) + } else { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + } + } + ffs = append(ffs, ff) + } + return ffs, nil +} + +type FileFilter struct { + Filename *regexp.Regexp + LineFilters LineFilters +} + +func (f FileFilter) Matches(locations []CodeLocation) bool { + for _, location := range locations { + if f.Filename.MatchString(location.FileName) && + f.LineFilters.Matches(location.LineNumber) { + return true + } + + } + return false +} + +type FileFilters []FileFilter + +func (ffs FileFilters) Matches(locations []CodeLocation) bool { + for _, ff := range ffs { + if ff.Matches(locations) { + return true + } + } + + return false +} + +type LineFilter struct { + Min int + Max int +} + +func (lf LineFilter) Matches(line int) bool { + return lf.Min <= line && line < lf.Max +} + +type LineFilters []LineFilter + +func (lfs LineFilters) Matches(line int) bool { + if len(lfs) == 0 { + return true + } + + for _, lf := range lfs { + if lf.Matches(line) { + return true + } + } + return false +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go new file mode 100644 index 00000000..de69f302 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go @@ -0,0 +1,490 @@ +package types + +import ( + "flag" + "fmt" + "io" + "reflect" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/formatter" +) + +type GinkgoFlag struct { + Name string + KeyPath string + SectionKey string + + Usage string + UsageArgument string + UsageDefaultValue string + + DeprecatedName string + DeprecatedDocLink string + DeprecatedVersion string + + ExportAs string + AlwaysExport bool +} + +type GinkgoFlags []GinkgoFlag + +func (f GinkgoFlags) CopyAppend(flags ...GinkgoFlag) GinkgoFlags { + out := GinkgoFlags{} + out = append(out, f...) + out = append(out, flags...) + return out +} + +func (f GinkgoFlags) WithPrefix(prefix string) GinkgoFlags { + if prefix == "" { + return f + } + out := GinkgoFlags{} + for _, flag := range f { + if flag.Name != "" { + flag.Name = prefix + "." + flag.Name + } + if flag.DeprecatedName != "" { + flag.DeprecatedName = prefix + "." + flag.DeprecatedName + } + if flag.ExportAs != "" { + flag.ExportAs = prefix + "." + flag.ExportAs + } + out = append(out, flag) + } + return out +} + +func (f GinkgoFlags) SubsetWithNames(names ...string) GinkgoFlags { + out := GinkgoFlags{} + for _, flag := range f { + for _, name := range names { + if flag.Name == name { + out = append(out, flag) + break + } + } + } + return out +} + +type GinkgoFlagSection struct { + Key string + Style string + Succinct bool + Heading string + Description string +} + +type GinkgoFlagSections []GinkgoFlagSection + +func (gfs GinkgoFlagSections) Lookup(key string) (GinkgoFlagSection, bool) { + for _, section := range gfs { + if section.Key == key { + return section, true + } + } + + return GinkgoFlagSection{}, false +} + +type GinkgoFlagSet struct { + flags GinkgoFlags + bindings interface{} + + sections GinkgoFlagSections + extraGoFlagsSection GinkgoFlagSection + + flagSet *flag.FlagSet +} + +// Call NewGinkgoFlagSet to create GinkgoFlagSet that creates and binds to it's own *flag.FlagSet +func NewGinkgoFlagSet(flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections) (GinkgoFlagSet, error) { + return bindFlagSet(GinkgoFlagSet{ + flags: flags, + bindings: bindings, + sections: sections, + }, nil) +} + +// Call NewGinkgoFlagSet to create GinkgoFlagSet that extends an existing *flag.FlagSet +func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections, extraGoFlagsSection GinkgoFlagSection) (GinkgoFlagSet, error) { + return bindFlagSet(GinkgoFlagSet{ + flags: flags, + bindings: bindings, + sections: sections, + extraGoFlagsSection: extraGoFlagsSection, + }, flagSet) +} + +func bindFlagSet(f GinkgoFlagSet, flagSet *flag.FlagSet) (GinkgoFlagSet, error) { + if flagSet == nil { + f.flagSet = flag.NewFlagSet("", flag.ContinueOnError) + //suppress all output as Ginkgo is responsible for formatting usage + f.flagSet.SetOutput(io.Discard) + } else { + f.flagSet = flagSet + //we're piggybacking on an existing flagset (typically go test) so we have limited control + //on user feedback + f.flagSet.Usage = f.substituteUsage + } + + for _, flag := range f.flags { + name := flag.Name + + deprecatedUsage := "[DEPRECATED]" + deprecatedName := flag.DeprecatedName + if name != "" { + deprecatedUsage = fmt.Sprintf("[DEPRECATED] use --%s instead", name) + } else if flag.Usage != "" { + deprecatedUsage += " " + flag.Usage + } + + value, ok := valueAtKeyPath(f.bindings, flag.KeyPath) + if !ok { + return GinkgoFlagSet{}, fmt.Errorf("could not load KeyPath: %s", flag.KeyPath) + } + + iface, addr := value.Interface(), value.Addr().Interface() + + switch value.Type() { + case reflect.TypeOf(string("")): + if name != "" { + f.flagSet.StringVar(addr.(*string), name, iface.(string), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.StringVar(addr.(*string), deprecatedName, iface.(string), deprecatedUsage) + } + case reflect.TypeOf(int64(0)): + if name != "" { + f.flagSet.Int64Var(addr.(*int64), name, iface.(int64), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.Int64Var(addr.(*int64), deprecatedName, iface.(int64), deprecatedUsage) + } + case reflect.TypeOf(float64(0)): + if name != "" { + f.flagSet.Float64Var(addr.(*float64), name, iface.(float64), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.Float64Var(addr.(*float64), deprecatedName, iface.(float64), deprecatedUsage) + } + case reflect.TypeOf(int(0)): + if name != "" { + f.flagSet.IntVar(addr.(*int), name, iface.(int), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.IntVar(addr.(*int), deprecatedName, iface.(int), deprecatedUsage) + } + case reflect.TypeOf(bool(true)): + if name != "" { + f.flagSet.BoolVar(addr.(*bool), name, iface.(bool), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.BoolVar(addr.(*bool), deprecatedName, iface.(bool), deprecatedUsage) + } + case reflect.TypeOf(time.Duration(0)): + if name != "" { + f.flagSet.DurationVar(addr.(*time.Duration), name, iface.(time.Duration), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.DurationVar(addr.(*time.Duration), deprecatedName, iface.(time.Duration), deprecatedUsage) + } + + case reflect.TypeOf([]string{}): + if name != "" { + f.flagSet.Var(stringSliceVar{value}, name, flag.Usage) + } + if deprecatedName != "" { + f.flagSet.Var(stringSliceVar{value}, deprecatedName, deprecatedUsage) + } + default: + return GinkgoFlagSet{}, fmt.Errorf("unsupported type %T", iface) + } + } + + return f, nil +} + +func (f GinkgoFlagSet) IsZero() bool { + return f.flagSet == nil +} + +func (f GinkgoFlagSet) WasSet(name string) bool { + found := false + f.flagSet.Visit(func(f *flag.Flag) { + if f.Name == name { + found = true + } + }) + + return found +} + +func (f GinkgoFlagSet) Lookup(name string) *flag.Flag { + return f.flagSet.Lookup(name) +} + +func (f GinkgoFlagSet) Parse(args []string) ([]string, error) { + if f.IsZero() { + return args, nil + } + err := f.flagSet.Parse(args) + if err != nil { + return []string{}, err + } + return f.flagSet.Args(), nil +} + +func (f GinkgoFlagSet) ValidateDeprecations(deprecationTracker *DeprecationTracker) { + if f.IsZero() { + return + } + f.flagSet.Visit(func(flag *flag.Flag) { + for _, ginkgoFlag := range f.flags { + if ginkgoFlag.DeprecatedName != "" && strings.HasSuffix(flag.Name, ginkgoFlag.DeprecatedName) { + message := fmt.Sprintf("--%s is deprecated", ginkgoFlag.DeprecatedName) + if ginkgoFlag.Name != "" { + message = fmt.Sprintf("--%s is deprecated, use --%s instead", ginkgoFlag.DeprecatedName, ginkgoFlag.Name) + } else if ginkgoFlag.Usage != "" { + message += " " + ginkgoFlag.Usage + } + + deprecationTracker.TrackDeprecation(Deprecation{ + Message: message, + DocLink: ginkgoFlag.DeprecatedDocLink, + Version: ginkgoFlag.DeprecatedVersion, + }) + } + } + }) +} + +func (f GinkgoFlagSet) Usage() string { + if f.IsZero() { + return "" + } + groupedFlags := map[GinkgoFlagSection]GinkgoFlags{} + ungroupedFlags := GinkgoFlags{} + managedFlags := map[string]bool{} + extraGoFlags := []*flag.Flag{} + + for _, flag := range f.flags { + managedFlags[flag.Name] = true + managedFlags[flag.DeprecatedName] = true + + if flag.Name == "" { + continue + } + + section, ok := f.sections.Lookup(flag.SectionKey) + if ok { + groupedFlags[section] = append(groupedFlags[section], flag) + } else { + ungroupedFlags = append(ungroupedFlags, flag) + } + } + + f.flagSet.VisitAll(func(flag *flag.Flag) { + if !managedFlags[flag.Name] { + extraGoFlags = append(extraGoFlags, flag) + } + }) + + out := "" + for _, section := range f.sections { + flags := groupedFlags[section] + if len(flags) == 0 { + continue + } + out += f.usageForSection(section) + if section.Succinct { + succinctFlags := []string{} + for _, flag := range flags { + if flag.Name != "" { + succinctFlags = append(succinctFlags, fmt.Sprintf("--%s", flag.Name)) + } + } + out += formatter.Fiw(1, formatter.COLS, section.Style+strings.Join(succinctFlags, ", ")+"{{/}}\n") + } else { + for _, flag := range flags { + out += f.usageForFlag(flag, section.Style) + } + } + out += "\n" + } + if len(ungroupedFlags) > 0 { + for _, flag := range ungroupedFlags { + out += f.usageForFlag(flag, "") + } + out += "\n" + } + if len(extraGoFlags) > 0 { + out += f.usageForSection(f.extraGoFlagsSection) + for _, goFlag := range extraGoFlags { + out += f.usageForGoFlag(goFlag) + } + } + + return out +} + +func (f GinkgoFlagSet) substituteUsage() { + fmt.Fprintln(f.flagSet.Output(), f.Usage()) +} + +func valueAtKeyPath(root interface{}, keyPath string) (reflect.Value, bool) { + if len(keyPath) == 0 { + return reflect.Value{}, false + } + + val := reflect.ValueOf(root) + components := strings.Split(keyPath, ".") + for _, component := range components { + val = reflect.Indirect(val) + switch val.Kind() { + case reflect.Map: + val = val.MapIndex(reflect.ValueOf(component)) + if val.Kind() == reflect.Interface { + val = reflect.ValueOf(val.Interface()) + } + case reflect.Struct: + val = val.FieldByName(component) + default: + return reflect.Value{}, false + } + if (val == reflect.Value{}) { + return reflect.Value{}, false + } + } + + return val, true +} + +func (f GinkgoFlagSet) usageForSection(section GinkgoFlagSection) string { + out := formatter.F(section.Style + "{{bold}}{{underline}}" + section.Heading + "{{/}}\n") + if section.Description != "" { + out += formatter.Fiw(0, formatter.COLS, section.Description+"\n") + } + return out +} + +func (f GinkgoFlagSet) usageForFlag(flag GinkgoFlag, style string) string { + argument := flag.UsageArgument + defValue := flag.UsageDefaultValue + if argument == "" { + value, _ := valueAtKeyPath(f.bindings, flag.KeyPath) + switch value.Type() { + case reflect.TypeOf(string("")): + argument = "string" + case reflect.TypeOf(int64(0)), reflect.TypeOf(int(0)): + argument = "int" + case reflect.TypeOf(time.Duration(0)): + argument = "duration" + case reflect.TypeOf(float64(0)): + argument = "float" + case reflect.TypeOf([]string{}): + argument = "string" + } + } + if argument != "" { + argument = "[" + argument + "] " + } + if defValue != "" { + defValue = fmt.Sprintf("(default: %s)", defValue) + } + hyphens := "--" + if len(flag.Name) == 1 { + hyphens = "-" + } + + out := formatter.Fi(1, style+"%s%s{{/}} %s{{gray}}%s{{/}}\n", hyphens, flag.Name, argument, defValue) + out += formatter.Fiw(2, formatter.COLS, "{{light-gray}}%s{{/}}\n", flag.Usage) + return out +} + +func (f GinkgoFlagSet) usageForGoFlag(goFlag *flag.Flag) string { + //Taken directly from the flag package + out := fmt.Sprintf(" -%s", goFlag.Name) + name, usage := flag.UnquoteUsage(goFlag) + if len(name) > 0 { + out += " " + name + } + if len(out) <= 4 { + out += "\t" + } else { + out += "\n \t" + } + out += strings.ReplaceAll(usage, "\n", "\n \t") + out += "\n" + return out +} + +type stringSliceVar struct { + slice reflect.Value +} + +func (ssv stringSliceVar) String() string { return "" } +func (ssv stringSliceVar) Set(s string) error { + ssv.slice.Set(reflect.AppendSlice(ssv.slice, reflect.ValueOf([]string{s}))) + return nil +} + +// given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. +func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) { + result := []string{} + for _, flag := range flags { + name := flag.ExportAs + if name == "" { + name = flag.Name + } + if name == "" { + continue + } + + value, ok := valueAtKeyPath(bindings, flag.KeyPath) + if !ok { + return []string{}, fmt.Errorf("could not load KeyPath: %s", flag.KeyPath) + } + + iface := value.Interface() + switch value.Type() { + case reflect.TypeOf(string("")): + if iface.(string) != "" || flag.AlwaysExport { + result = append(result, fmt.Sprintf("--%s=%s", name, iface)) + } + case reflect.TypeOf(int64(0)): + if iface.(int64) != 0 || flag.AlwaysExport { + result = append(result, fmt.Sprintf("--%s=%d", name, iface)) + } + case reflect.TypeOf(float64(0)): + if iface.(float64) != 0 || flag.AlwaysExport { + result = append(result, fmt.Sprintf("--%s=%f", name, iface)) + } + case reflect.TypeOf(int(0)): + if iface.(int) != 0 || flag.AlwaysExport { + result = append(result, fmt.Sprintf("--%s=%d", name, iface)) + } + case reflect.TypeOf(bool(true)): + if iface.(bool) { + result = append(result, fmt.Sprintf("--%s", name)) + } + case reflect.TypeOf(time.Duration(0)): + if iface.(time.Duration) != time.Duration(0) || flag.AlwaysExport { + result = append(result, fmt.Sprintf("--%s=%s", name, iface)) + } + + case reflect.TypeOf([]string{}): + strings := iface.([]string) + for _, s := range strings { + result = append(result, fmt.Sprintf("--%s=%s", name, s)) + } + default: + return []string{}, fmt.Errorf("unsupported type %T", iface) + } + } + + return result, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go new file mode 100644 index 00000000..b0d3b651 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go @@ -0,0 +1,358 @@ +package types + +import ( + "fmt" + "regexp" + "strings" +) + +var DEBUG_LABEL_FILTER_PARSING = false + +type LabelFilter func([]string) bool + +func matchLabelAction(label string) LabelFilter { + expected := strings.ToLower(label) + return func(labels []string) bool { + for i := range labels { + if strings.ToLower(labels[i]) == expected { + return true + } + } + return false + } +} + +func matchLabelRegexAction(regex *regexp.Regexp) LabelFilter { + return func(labels []string) bool { + for i := range labels { + if regex.MatchString(labels[i]) { + return true + } + } + return false + } +} + +func notAction(filter LabelFilter) LabelFilter { + return func(labels []string) bool { return !filter(labels) } +} + +func andAction(a, b LabelFilter) LabelFilter { + return func(labels []string) bool { return a(labels) && b(labels) } +} + +func orAction(a, b LabelFilter) LabelFilter { + return func(labels []string) bool { return a(labels) || b(labels) } +} + +type lfToken uint + +const ( + lfTokenInvalid lfToken = iota + + lfTokenRoot + lfTokenOpenGroup + lfTokenCloseGroup + lfTokenNot + lfTokenAnd + lfTokenOr + lfTokenRegexp + lfTokenLabel + lfTokenEOF +) + +func (l lfToken) Precedence() int { + switch l { + case lfTokenRoot, lfTokenOpenGroup: + return 0 + case lfTokenOr: + return 1 + case lfTokenAnd: + return 2 + case lfTokenNot: + return 3 + } + return -1 +} + +func (l lfToken) String() string { + switch l { + case lfTokenRoot: + return "ROOT" + case lfTokenOpenGroup: + return "(" + case lfTokenCloseGroup: + return ")" + case lfTokenNot: + return "!" + case lfTokenAnd: + return "&&" + case lfTokenOr: + return "||" + case lfTokenRegexp: + return "/regexp/" + case lfTokenLabel: + return "label" + case lfTokenEOF: + return "EOF" + } + return "INVALID" +} + +type treeNode struct { + token lfToken + location int + value string + + parent *treeNode + leftNode *treeNode + rightNode *treeNode +} + +func (tn *treeNode) setRightNode(node *treeNode) { + tn.rightNode = node + node.parent = tn +} + +func (tn *treeNode) setLeftNode(node *treeNode) { + tn.leftNode = node + node.parent = tn +} + +func (tn *treeNode) firstAncestorWithPrecedenceLEQ(precedence int) *treeNode { + if tn.token.Precedence() <= precedence { + return tn + } + return tn.parent.firstAncestorWithPrecedenceLEQ(precedence) +} + +func (tn *treeNode) firstUnmatchedOpenNode() *treeNode { + if tn.token == lfTokenOpenGroup { + return tn + } + if tn.parent == nil { + return nil + } + return tn.parent.firstUnmatchedOpenNode() +} + +func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) { + switch tn.token { + case lfTokenOpenGroup: + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, "Mismatched '(' - could not find matching ')'.") + case lfTokenLabel: + return matchLabelAction(tn.value), nil + case lfTokenRegexp: + re, err := regexp.Compile(tn.value) + if err != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err)) + } + return matchLabelRegexAction(re), nil + } + + if tn.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, -1, "Unexpected EOF.") + } + rightLF, err := tn.rightNode.constructLabelFilter(input) + if err != nil { + return nil, err + } + + switch tn.token { + case lfTokenRoot, lfTokenCloseGroup: + return rightLF, nil + case lfTokenNot: + return notAction(rightLF), nil + } + + if tn.leftNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Malformed tree - '%s' is missing left operand.", tn.token)) + } + leftLF, err := tn.leftNode.constructLabelFilter(input) + if err != nil { + return nil, err + } + + switch tn.token { + case lfTokenAnd: + return andAction(leftLF, rightLF), nil + case lfTokenOr: + return orAction(leftLF, rightLF), nil + } + + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Invalid token '%s'.", tn.token)) +} + +func (tn *treeNode) tokenString() string { + out := fmt.Sprintf("<%s", tn.token) + if tn.value != "" { + out += " | " + tn.value + } + out += ">" + return out +} + +func (tn *treeNode) toString(indent int) string { + out := tn.tokenString() + "\n" + if tn.leftNode != nil { + out += fmt.Sprintf("%s |_(L)_%s", strings.Repeat(" ", indent), tn.leftNode.toString(indent+1)) + } + if tn.rightNode != nil { + out += fmt.Sprintf("%s |_(R)_%s", strings.Repeat(" ", indent), tn.rightNode.toString(indent+1)) + } + return out +} + +func tokenize(input string) func() (*treeNode, error) { + runes, i := []rune(input), 0 + + peekIs := func(r rune) bool { + if i+1 < len(runes) { + return runes[i+1] == r + } + return false + } + + consumeUntil := func(cutset string) (string, int) { + j := i + for ; j < len(runes); j++ { + if strings.IndexRune(cutset, runes[j]) >= 0 { + break + } + } + return string(runes[i:j]), j - i + } + + return func() (*treeNode, error) { + for i < len(runes) && runes[i] == ' ' { + i += 1 + } + + if i >= len(runes) { + return &treeNode{token: lfTokenEOF}, nil + } + + node := &treeNode{location: i} + switch runes[i] { + case '&': + if !peekIs('&') { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '&'. Did you mean '&&'?") + } + i += 2 + node.token = lfTokenAnd + case '|': + if !peekIs('|') { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '|'. Did you mean '||'?") + } + i += 2 + node.token = lfTokenOr + case '!': + i += 1 + node.token = lfTokenNot + case ',': + i += 1 + node.token = lfTokenOr + case '(': + i += 1 + node.token = lfTokenOpenGroup + case ')': + i += 1 + node.token = lfTokenCloseGroup + case '/': + i += 1 + value, n := consumeUntil("/") + i += n + 1 + node.token, node.value = lfTokenRegexp, value + default: + value, n := consumeUntil("&|!,()/") + i += n + node.token, node.value = lfTokenLabel, strings.TrimSpace(value) + } + return node, nil + } +} + +func MustParseLabelFilter(input string) LabelFilter { + filter, err := ParseLabelFilter(input) + if err != nil { + panic(err) + } + return filter +} + +func ParseLabelFilter(input string) (LabelFilter, error) { + if DEBUG_LABEL_FILTER_PARSING { + fmt.Println("\n==============") + fmt.Println("Input: ", input) + fmt.Print("Tokens: ") + } + if input == "" { + return func(_ []string) bool { return true }, nil + } + nextToken := tokenize(input) + + root := &treeNode{token: lfTokenRoot} + current := root +LOOP: + for { + node, err := nextToken() + if err != nil { + return nil, err + } + + if DEBUG_LABEL_FILTER_PARSING { + fmt.Print(node.tokenString() + " ") + } + + switch node.token { + case lfTokenEOF: + break LOOP + case lfTokenLabel, lfTokenRegexp: + if current.rightNode != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.") + } + current.setRightNode(node) + case lfTokenNot, lfTokenOpenGroup: + if current.rightNode != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Invalid token '%s'.", node.token)) + } + current.setRightNode(node) + current = node + case lfTokenAnd, lfTokenOr: + if current.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Operator '%s' missing left hand operand.", node.token)) + } + nodeToStealFrom := current.firstAncestorWithPrecedenceLEQ(node.token.Precedence()) + node.setLeftNode(nodeToStealFrom.rightNode) + nodeToStealFrom.setRightNode(node) + current = node + case lfTokenCloseGroup: + firstUnmatchedOpenNode := current.firstUnmatchedOpenNode() + if firstUnmatchedOpenNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Mismatched ')' - could not find matching '('.") + } + if firstUnmatchedOpenNode == current && current.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found empty '()' group.") + } + firstUnmatchedOpenNode.token = lfTokenCloseGroup //signify the group is now closed + current = firstUnmatchedOpenNode.parent + default: + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unknown token '%s'.", node.token)) + } + } + if DEBUG_LABEL_FILTER_PARSING { + fmt.Printf("\n Tree:\n%s", root.toString(0)) + } + return root.constructLabelFilter(input) +} + +func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) { + out := strings.TrimSpace(label) + if out == "" { + return "", GinkgoErrors.InvalidEmptyLabel(cl) + } + if strings.ContainsAny(out, "&|!,()/") { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + return out, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go new file mode 100644 index 00000000..7b1524b5 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go @@ -0,0 +1,190 @@ +package types + +import ( + "encoding/json" + "fmt" + "time" +) + +// ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports +// and across the network connection when running in parallel +type ReportEntryValue struct { + raw interface{} //unexported to prevent gob from freaking out about unregistered structs + AsJSON string + Representation string +} + +func WrapEntryValue(value interface{}) ReportEntryValue { + return ReportEntryValue{ + raw: value, + } +} + +func (rev ReportEntryValue) GetRawValue() interface{} { + return rev.raw +} + +func (rev ReportEntryValue) String() string { + if rev.raw == nil { + return "" + } + if colorableStringer, ok := rev.raw.(ColorableStringer); ok { + return colorableStringer.ColorableString() + } + + if stringer, ok := rev.raw.(fmt.Stringer); ok { + return stringer.String() + } + if rev.Representation != "" { + return rev.Representation + } + return fmt.Sprintf("%+v", rev.raw) +} + +func (rev ReportEntryValue) MarshalJSON() ([]byte, error) { + //All this to capture the representation at encoding-time, not creating time + //This way users can Report on pointers and get their final values at reporting-time + out := struct { + AsJSON string + Representation string + }{ + Representation: rev.String(), + } + asJSON, err := json.Marshal(rev.raw) + if err != nil { + return nil, err + } + out.AsJSON = string(asJSON) + + return json.Marshal(out) +} + +func (rev *ReportEntryValue) UnmarshalJSON(data []byte) error { + in := struct { + AsJSON string + Representation string + }{} + err := json.Unmarshal(data, &in) + if err != nil { + return err + } + rev.AsJSON = in.AsJSON + rev.Representation = in.Representation + return json.Unmarshal([]byte(in.AsJSON), &(rev.raw)) +} + +func (rev ReportEntryValue) GobEncode() ([]byte, error) { + return rev.MarshalJSON() +} + +func (rev *ReportEntryValue) GobDecode(data []byte) error { + return rev.UnmarshalJSON(data) +} + +// ReportEntry captures information attached to `SpecReport` via `AddReportEntry` +type ReportEntry struct { + // Visibility captures the visibility policy for this ReportEntry + Visibility ReportEntryVisibility + // Location captures the location of the AddReportEntry call + Location CodeLocation + + Time time.Time //need this for backwards compatibility + TimelineLocation TimelineLocation + + // Name captures the name of this report + Name string + // Value captures the (optional) object passed into AddReportEntry - this can be + // anything the user wants. The value passed to AddReportEntry is wrapped in a ReportEntryValue to make + // encoding/decoding the value easier. To access the raw value call entry.GetRawValue() + Value ReportEntryValue +} + +// ColorableStringer is an interface that ReportEntry values can satisfy. If they do then ColorableString() is used to generate their representation. +type ColorableStringer interface { + ColorableString() string +} + +// StringRepresentation() returns the string representation of the value associated with the ReportEntry -- +// if value is nil, empty string is returned +// if value is a `ColorableStringer` then `Value.ColorableString()` is returned +// if value is a `fmt.Stringer` then `Value.String()` is returned +// otherwise the value is formatted with "%+v" +func (entry ReportEntry) StringRepresentation() string { + return entry.Value.String() +} + +// GetRawValue returns the Value object that was passed to AddReportEntry +// If called in-process this will be the same object that was passed into AddReportEntry. +// If used from a rehydrated JSON file _or_ in a ReportAfterSuite when running in parallel this will be +// a JSON-decoded {}interface. If you want to reconstitute your original object you can decode the entry.Value.AsJSON +// field yourself. +func (entry ReportEntry) GetRawValue() interface{} { + return entry.Value.GetRawValue() +} + +func (entry ReportEntry) GetTimelineLocation() TimelineLocation { + return entry.TimelineLocation +} + +type ReportEntries []ReportEntry + +func (re ReportEntries) HasVisibility(visibilities ...ReportEntryVisibility) bool { + for _, entry := range re { + if entry.Visibility.Is(visibilities...) { + return true + } + } + return false +} + +func (re ReportEntries) WithVisibility(visibilities ...ReportEntryVisibility) ReportEntries { + out := ReportEntries{} + + for _, entry := range re { + if entry.Visibility.Is(visibilities...) { + out = append(out, entry) + } + } + + return out +} + +// ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter +type ReportEntryVisibility uint + +const ( + // Always print out this ReportEntry + ReportEntryVisibilityAlways ReportEntryVisibility = iota + // Only print out this ReportEntry if the spec fails or if the test is run with -v + ReportEntryVisibilityFailureOrVerbose + // Never print out this ReportEntry (note that ReportEntrys are always encoded in machine readable reports (e.g. JSON, JUnit, etc.)) + ReportEntryVisibilityNever +) + +var revEnumSupport = NewEnumSupport(map[uint]string{ + uint(ReportEntryVisibilityAlways): "always", + uint(ReportEntryVisibilityFailureOrVerbose): "failure-or-verbose", + uint(ReportEntryVisibilityNever): "never", +}) + +func (rev ReportEntryVisibility) String() string { + return revEnumSupport.String(uint(rev)) +} +func (rev *ReportEntryVisibility) UnmarshalJSON(b []byte) error { + out, err := revEnumSupport.UnmarshJSON(b) + *rev = ReportEntryVisibility(out) + return err +} +func (rev ReportEntryVisibility) MarshalJSON() ([]byte, error) { + return revEnumSupport.MarshJSON(uint(rev)) +} + +func (v ReportEntryVisibility) Is(visibilities ...ReportEntryVisibility) bool { + for _, visibility := range visibilities { + if v == visibility { + return true + } + } + + return false +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go new file mode 100644 index 00000000..aae69b04 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -0,0 +1,914 @@ +package types + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + "time" +) + +const GINKGO_FOCUS_EXIT_CODE = 197 +const GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999" + +// Report captures information about a Ginkgo test run +type Report struct { + //SuitePath captures the absolute path to the test suite + SuitePath string + + //SuiteDescription captures the description string passed to the DSL's RunSpecs() function + SuiteDescription string + + //SuiteLabels captures any labels attached to the suite by the DSL's RunSpecs() function + SuiteLabels []string + + //SuiteSucceeded captures the success or failure status of the test run + //If true, the test run is considered successful. + //If false, the test run is considered unsuccessful + SuiteSucceeded bool + + //SuiteHasProgrammaticFocus captures whether the test suite has a test or set of tests that are programmatically focused + //(i.e an `FIt` or an `FDescribe` + SuiteHasProgrammaticFocus bool + + //SpecialSuiteFailureReasons may contain special failure reasons + //For example, a test suite might be considered "failed" even if none of the individual specs + //have a failure state. For example, if the user has configured --fail-on-pending the test suite + //will have failed if there are pending tests even though all non-pending tests may have passed. In such + //cases, Ginkgo populates SpecialSuiteFailureReasons with a clear message indicating the reason for the failure. + //SpecialSuiteFailureReasons is also populated if the test suite is interrupted by the user. + //Since multiple special failure reasons can occur, this field is a slice. + SpecialSuiteFailureReasons []string + + //PreRunStats contains a set of stats captured before the test run begins. This is primarily used + //by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs) + //and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters. + PreRunStats PreRunStats + + //StartTime and EndTime capture the start and end time of the test run + StartTime time.Time + EndTime time.Time + + //RunTime captures the duration of the test run + RunTime time.Duration + + //SuiteConfig captures the Ginkgo configuration governing this test run + //SuiteConfig includes information necessary for reproducing an identical test run, + //such as the random seed and any filters applied during the test run + SuiteConfig SuiteConfig + + //SpecReports is a list of all SpecReports generated by this test run + //It is empty when the SuiteReport is provided to ReportBeforeSuite + SpecReports SpecReports +} + +// PreRunStats contains a set of stats captured before the test run begins. This is primarily used +// by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs) +// and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters. +type PreRunStats struct { + TotalSpecs int + SpecsThatWillRun int +} + +// Add is used by Ginkgo's parallel aggregation mechanisms to combine test run reports form individual parallel processes +// to form a complete final report. +func (report Report) Add(other Report) Report { + report.SuiteSucceeded = report.SuiteSucceeded && other.SuiteSucceeded + + if other.StartTime.Before(report.StartTime) { + report.StartTime = other.StartTime + } + + if other.EndTime.After(report.EndTime) { + report.EndTime = other.EndTime + } + + specialSuiteFailureReasons := []string{} + reasonsLookup := map[string]bool{} + for _, reasons := range [][]string{report.SpecialSuiteFailureReasons, other.SpecialSuiteFailureReasons} { + for _, reason := range reasons { + if !reasonsLookup[reason] { + reasonsLookup[reason] = true + specialSuiteFailureReasons = append(specialSuiteFailureReasons, reason) + } + } + } + report.SpecialSuiteFailureReasons = specialSuiteFailureReasons + report.RunTime = report.EndTime.Sub(report.StartTime) + + reports := make(SpecReports, len(report.SpecReports)+len(other.SpecReports)) + copy(reports, report.SpecReports) + offset := len(report.SpecReports) + for i := range other.SpecReports { + reports[i+offset] = other.SpecReports[i] + } + + report.SpecReports = reports + return report +} + +// SpecReport captures information about a Ginkgo spec. +type SpecReport struct { + // ContainerHierarchyTexts is a slice containing the text strings of + // all Describe/Context/When containers in this spec's hierarchy. + ContainerHierarchyTexts []string + + // ContainerHierarchyLocations is a slice containing the CodeLocations of + // all Describe/Context/When containers in this spec's hierarchy. + ContainerHierarchyLocations []CodeLocation + + // ContainerHierarchyLabels is a slice containing the labels of + // all Describe/Context/When containers in this spec's hierarchy + ContainerHierarchyLabels [][]string + + // LeafNodeType, LeadNodeLocation, LeafNodeLabels and LeafNodeText capture the NodeType, CodeLocation, and text + // of the Ginkgo node being tested (typically an NodeTypeIt node, though this can also be + // one of the NodeTypesForSuiteLevelNodes node types) + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeText string + + // State captures whether the spec has passed, failed, etc. + State SpecState + + // IsSerial captures whether the spec has the Serial decorator + IsSerial bool + + // IsInOrderedContainer captures whether the spec appears in an Ordered container + IsInOrderedContainer bool + + // StartTime and EndTime capture the start and end time of the spec + StartTime time.Time + EndTime time.Time + + // RunTime captures the duration of the spec + RunTime time.Duration + + // ParallelProcess captures the parallel process that this spec ran on + ParallelProcess int + + // RunningInParallel captures whether this spec is part of a suite that ran in parallel + RunningInParallel bool + + //Failure is populated if a spec has failed, panicked, been interrupted, or skipped by the user (e.g. calling Skip()) + //It includes detailed information about the Failure + Failure Failure + + // NumAttempts captures the number of times this Spec was run. + // Flakey specs can be retried with ginkgo --flake-attempts=N or the use of the FlakeAttempts decorator. + // Repeated specs can be retried with the use of the MustPassRepeatedly decorator + NumAttempts int + + // MaxFlakeAttempts captures whether the spec has been retried with ginkgo --flake-attempts=N or the use of the FlakeAttempts decorator. + MaxFlakeAttempts int + + // MaxMustPassRepeatedly captures whether the spec has the MustPassRepeatedly decorator + MaxMustPassRepeatedly int + + // CapturedGinkgoWriterOutput contains text printed to the GinkgoWriter + CapturedGinkgoWriterOutput string + + // CapturedStdOutErr contains text printed to stdout/stderr (when running in parallel) + // This is always empty when running in series or calling CurrentSpecReport() + // It is used internally by Ginkgo's reporter + CapturedStdOutErr string + + // ReportEntries contains any reports added via `AddReportEntry` + ReportEntries ReportEntries + + // ProgressReports contains any progress reports generated during this spec. These can either be manually triggered, or automatically generated by Ginkgo via the PollProgressAfter() decorator + ProgressReports []ProgressReport + + // AdditionalFailures contains any failures that occurred after the initial spec failure. These typically occur in cleanup nodes after the initial failure and are only emitted when running in verbose mode. + AdditionalFailures []AdditionalFailure + + // SpecEvents capture additional events that occur during the spec run + SpecEvents SpecEvents +} + +func (report SpecReport) MarshalJSON() ([]byte, error) { + //All this to avoid emitting an empty Failure struct in the JSON + out := struct { + ContainerHierarchyTexts []string + ContainerHierarchyLocations []CodeLocation + ContainerHierarchyLabels [][]string + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeText string + State SpecState + StartTime time.Time + EndTime time.Time + RunTime time.Duration + ParallelProcess int + Failure *Failure `json:",omitempty"` + NumAttempts int + MaxFlakeAttempts int + MaxMustPassRepeatedly int + CapturedGinkgoWriterOutput string `json:",omitempty"` + CapturedStdOutErr string `json:",omitempty"` + ReportEntries ReportEntries `json:",omitempty"` + ProgressReports []ProgressReport `json:",omitempty"` + AdditionalFailures []AdditionalFailure `json:",omitempty"` + SpecEvents SpecEvents `json:",omitempty"` + }{ + ContainerHierarchyTexts: report.ContainerHierarchyTexts, + ContainerHierarchyLocations: report.ContainerHierarchyLocations, + ContainerHierarchyLabels: report.ContainerHierarchyLabels, + LeafNodeType: report.LeafNodeType, + LeafNodeLocation: report.LeafNodeLocation, + LeafNodeLabels: report.LeafNodeLabels, + LeafNodeText: report.LeafNodeText, + State: report.State, + StartTime: report.StartTime, + EndTime: report.EndTime, + RunTime: report.RunTime, + ParallelProcess: report.ParallelProcess, + Failure: nil, + ReportEntries: nil, + NumAttempts: report.NumAttempts, + MaxFlakeAttempts: report.MaxFlakeAttempts, + MaxMustPassRepeatedly: report.MaxMustPassRepeatedly, + CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput, + CapturedStdOutErr: report.CapturedStdOutErr, + } + + if !report.Failure.IsZero() { + out.Failure = &(report.Failure) + } + if len(report.ReportEntries) > 0 { + out.ReportEntries = report.ReportEntries + } + if len(report.ProgressReports) > 0 { + out.ProgressReports = report.ProgressReports + } + if len(report.AdditionalFailures) > 0 { + out.AdditionalFailures = report.AdditionalFailures + } + if len(report.SpecEvents) > 0 { + out.SpecEvents = report.SpecEvents + } + + return json.Marshal(out) +} + +// CombinedOutput returns a single string representation of both CapturedStdOutErr and CapturedGinkgoWriterOutput +// Note that both are empty when using CurrentSpecReport() so CurrentSpecReport().CombinedOutput() will always be empty. +// CombinedOutput() is used internally by Ginkgo's reporter. +func (report SpecReport) CombinedOutput() string { + if report.CapturedStdOutErr == "" { + return report.CapturedGinkgoWriterOutput + } + if report.CapturedGinkgoWriterOutput == "" { + return report.CapturedStdOutErr + } + return report.CapturedStdOutErr + "\n" + report.CapturedGinkgoWriterOutput +} + +// Failed returns true if report.State is one of the SpecStateFailureStates +// (SpecStateFailed, SpecStatePanicked, SpecStateinterrupted, SpecStateAborted) +func (report SpecReport) Failed() bool { + return report.State.Is(SpecStateFailureStates) +} + +// FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText +func (report SpecReport) FullText() string { + texts := []string{} + texts = append(texts, report.ContainerHierarchyTexts...) + if report.LeafNodeText != "" { + texts = append(texts, report.LeafNodeText) + } + return strings.Join(texts, " ") +} + +// Labels returns a deduped set of all the spec's Labels. +func (report SpecReport) Labels() []string { + out := []string{} + seen := map[string]bool{} + for _, labels := range report.ContainerHierarchyLabels { + for _, label := range labels { + if !seen[label] { + seen[label] = true + out = append(out, label) + } + } + } + for _, label := range report.LeafNodeLabels { + if !seen[label] { + seen[label] = true + out = append(out, label) + } + } + + return out +} + +// MatchesLabelFilter returns true if the spec satisfies the passed in label filter query +func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { + filter, err := ParseLabelFilter(query) + if err != nil { + return false, err + } + return filter(report.Labels()), nil +} + +// FileName() returns the name of the file containing the spec +func (report SpecReport) FileName() string { + return report.LeafNodeLocation.FileName +} + +// LineNumber() returns the line number of the leaf node +func (report SpecReport) LineNumber() int { + return report.LeafNodeLocation.LineNumber +} + +// FailureMessage() returns the failure message (or empty string if the test hasn't failed) +func (report SpecReport) FailureMessage() string { + return report.Failure.Message +} + +// FailureLocation() returns the location of the failure (or an empty CodeLocation if the test hasn't failed) +func (report SpecReport) FailureLocation() CodeLocation { + return report.Failure.Location +} + +// Timeline() returns a timeline view of the report +func (report SpecReport) Timeline() Timeline { + timeline := Timeline{} + if !report.Failure.IsZero() { + timeline = append(timeline, report.Failure) + if report.Failure.AdditionalFailure != nil { + timeline = append(timeline, *(report.Failure.AdditionalFailure)) + } + } + for _, additionalFailure := range report.AdditionalFailures { + timeline = append(timeline, additionalFailure) + } + for _, reportEntry := range report.ReportEntries { + timeline = append(timeline, reportEntry) + } + for _, progressReport := range report.ProgressReports { + timeline = append(timeline, progressReport) + } + for _, specEvent := range report.SpecEvents { + timeline = append(timeline, specEvent) + } + sort.Sort(timeline) + return timeline +} + +type SpecReports []SpecReport + +// WithLeafNodeType returns the subset of SpecReports with LeafNodeType matching one of the requested NodeTypes +func (reports SpecReports) WithLeafNodeType(nodeTypes NodeType) SpecReports { + count := 0 + for i := range reports { + if reports[i].LeafNodeType.Is(nodeTypes) { + count++ + } + } + + out := make(SpecReports, count) + j := 0 + for i := range reports { + if reports[i].LeafNodeType.Is(nodeTypes) { + out[j] = reports[i] + j++ + } + } + return out +} + +// WithState returns the subset of SpecReports with State matching one of the requested SpecStates +func (reports SpecReports) WithState(states SpecState) SpecReports { + count := 0 + for i := range reports { + if reports[i].State.Is(states) { + count++ + } + } + + out, j := make(SpecReports, count), 0 + for i := range reports { + if reports[i].State.Is(states) { + out[j] = reports[i] + j++ + } + } + return out +} + +// CountWithState returns the number of SpecReports with State matching one of the requested SpecStates +func (reports SpecReports) CountWithState(states SpecState) int { + n := 0 + for i := range reports { + if reports[i].State.Is(states) { + n += 1 + } + } + return n +} + +// If the Spec passes, CountOfFlakedSpecs returns the number of SpecReports that failed after multiple attempts. +func (reports SpecReports) CountOfFlakedSpecs() int { + n := 0 + for i := range reports { + if reports[i].MaxFlakeAttempts > 1 && reports[i].State.Is(SpecStatePassed) && reports[i].NumAttempts > 1 { + n += 1 + } + } + return n +} + +// If the Spec fails, CountOfRepeatedSpecs returns the number of SpecReports that passed after multiple attempts +func (reports SpecReports) CountOfRepeatedSpecs() int { + n := 0 + for i := range reports { + if reports[i].MaxMustPassRepeatedly > 1 && reports[i].State.Is(SpecStateFailureStates) && reports[i].NumAttempts > 1 { + n += 1 + } + } + return n +} + +// TimelineLocation captures the location of an event in the spec's timeline +type TimelineLocation struct { + //Offset is the offset (in bytes) of the event relative to the GinkgoWriter stream + Offset int `json:",omitempty"` + + //Order is the order of the event with respect to other events. The absolute value of Order + //is irrelevant. All that matters is that an event with a lower Order occurs before ane vent with a higher Order + Order int `json:",omitempty"` + + Time time.Time +} + +// TimelineEvent represent an event on the timeline +// consumers of Timeline will need to check the concrete type of each entry to determine how to handle it +type TimelineEvent interface { + GetTimelineLocation() TimelineLocation +} + +type Timeline []TimelineEvent + +func (t Timeline) Len() int { return len(t) } +func (t Timeline) Less(i, j int) bool { + return t[i].GetTimelineLocation().Order < t[j].GetTimelineLocation().Order +} +func (t Timeline) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t Timeline) WithoutHiddenReportEntries() Timeline { + out := Timeline{} + for _, event := range t { + if reportEntry, isReportEntry := event.(ReportEntry); isReportEntry && reportEntry.Visibility == ReportEntryVisibilityNever { + continue + } + out = append(out, event) + } + return out +} + +func (t Timeline) WithoutVeryVerboseSpecEvents() Timeline { + out := Timeline{} + for _, event := range t { + if specEvent, isSpecEvent := event.(SpecEvent); isSpecEvent && specEvent.IsOnlyVisibleAtVeryVerbose() { + continue + } + out = append(out, event) + } + return out +} + +// Failure captures failure information for an individual test +type Failure struct { + // Message - the failure message passed into Fail(...). When using a matcher library + // like Gomega, this will contain the failure message generated by Gomega. + // + // Message is also populated if the user has called Skip(...). + Message string + + // Location - the CodeLocation where the failure occurred + // This CodeLocation will include a fully-populated StackTrace + Location CodeLocation + + TimelineLocation TimelineLocation + + // ForwardedPanic - if the failure represents a captured panic (i.e. Summary.State == SpecStatePanicked) + // then ForwardedPanic will be populated with a string representation of the captured panic. + ForwardedPanic string `json:",omitempty"` + + // FailureNodeContext - one of three contexts describing the node in which the failure occurred: + // FailureNodeIsLeafNode means the failure occurred in the leaf node of the associated SpecReport. None of the other FailureNode fields will be populated + // FailureNodeAtTopLevel means the failure occurred in a non-leaf node that is defined at the top-level of the spec (i.e. not in a container). FailureNodeType and FailureNodeLocation will be populated. + // FailureNodeInContainer means the failure occurred in a non-leaf node that is defined within a container. FailureNodeType, FailureNodeLocation, and FailureNodeContainerIndex will be populated. + // + // FailureNodeType will contain the NodeType of the node in which the failure occurred. + // FailureNodeLocation will contain the CodeLocation of the node in which the failure occurred. + // If populated, FailureNodeContainerIndex will be the index into SpecReport.ContainerHierarchyTexts and SpecReport.ContainerHierarchyLocations that represents the parent container of the node in which the failure occurred. + FailureNodeContext FailureNodeContext `json:",omitempty"` + + FailureNodeType NodeType `json:",omitempty"` + + FailureNodeLocation CodeLocation `json:",omitempty"` + + FailureNodeContainerIndex int `json:",omitempty"` + + //ProgressReport is populated if the spec was interrupted or timed out + ProgressReport ProgressReport `json:",omitempty"` + + //AdditionalFailure is non-nil if a follow-on failure occurred within the same node after the primary failure. This only happens when a node has timed out or been interrupted. In such cases the AdditionalFailure can include information about where/why the spec was stuck. + AdditionalFailure *AdditionalFailure `json:",omitempty"` +} + +func (f Failure) IsZero() bool { + return f.Message == "" && (f.Location == CodeLocation{}) +} + +func (f Failure) GetTimelineLocation() TimelineLocation { + return f.TimelineLocation +} + +// FailureNodeContext captures the location context for the node containing the failing line of code +type FailureNodeContext uint + +const ( + FailureNodeContextInvalid FailureNodeContext = iota + + FailureNodeIsLeafNode + FailureNodeAtTopLevel + FailureNodeInContainer +) + +var fncEnumSupport = NewEnumSupport(map[uint]string{ + uint(FailureNodeContextInvalid): "INVALID FAILURE NODE CONTEXT", + uint(FailureNodeIsLeafNode): "leaf-node", + uint(FailureNodeAtTopLevel): "top-level", + uint(FailureNodeInContainer): "in-container", +}) + +func (fnc FailureNodeContext) String() string { + return fncEnumSupport.String(uint(fnc)) +} +func (fnc *FailureNodeContext) UnmarshalJSON(b []byte) error { + out, err := fncEnumSupport.UnmarshJSON(b) + *fnc = FailureNodeContext(out) + return err +} +func (fnc FailureNodeContext) MarshalJSON() ([]byte, error) { + return fncEnumSupport.MarshJSON(uint(fnc)) +} + +// AdditionalFailure capturs any additional failures that occur after the initial failure of a psec +// these typically occur in clean up nodes after the spec has failed. +// We can't simply use Failure as we want to track the SpecState to know what kind of failure this is +type AdditionalFailure struct { + State SpecState + Failure Failure +} + +func (f AdditionalFailure) GetTimelineLocation() TimelineLocation { + return f.Failure.TimelineLocation +} + +// SpecState captures the state of a spec +// To determine if a given `state` represents a failure state, use `state.Is(SpecStateFailureStates)` +type SpecState uint + +const ( + SpecStateInvalid SpecState = 0 + + SpecStatePending SpecState = 1 << iota + SpecStateSkipped + SpecStatePassed + SpecStateFailed + SpecStateAborted + SpecStatePanicked + SpecStateInterrupted + SpecStateTimedout +) + +var ssEnumSupport = NewEnumSupport(map[uint]string{ + uint(SpecStateInvalid): "INVALID SPEC STATE", + uint(SpecStatePending): "pending", + uint(SpecStateSkipped): "skipped", + uint(SpecStatePassed): "passed", + uint(SpecStateFailed): "failed", + uint(SpecStateAborted): "aborted", + uint(SpecStatePanicked): "panicked", + uint(SpecStateInterrupted): "interrupted", + uint(SpecStateTimedout): "timedout", +}) + +func (ss SpecState) String() string { + return ssEnumSupport.String(uint(ss)) +} +func (ss SpecState) GomegaString() string { + return ssEnumSupport.String(uint(ss)) +} +func (ss *SpecState) UnmarshalJSON(b []byte) error { + out, err := ssEnumSupport.UnmarshJSON(b) + *ss = SpecState(out) + return err +} +func (ss SpecState) MarshalJSON() ([]byte, error) { + return ssEnumSupport.MarshJSON(uint(ss)) +} + +var SpecStateFailureStates = SpecStateFailed | SpecStateTimedout | SpecStateAborted | SpecStatePanicked | SpecStateInterrupted + +func (ss SpecState) Is(states SpecState) bool { + return ss&states != 0 +} + +// ProgressReport captures the progress of the current spec. It is, effectively, a structured Ginkgo-aware stack trace +type ProgressReport struct { + Message string `json:",omitempty"` + ParallelProcess int `json:",omitempty"` + RunningInParallel bool `json:",omitempty"` + + ContainerHierarchyTexts []string `json:",omitempty"` + LeafNodeText string `json:",omitempty"` + LeafNodeLocation CodeLocation `json:",omitempty"` + SpecStartTime time.Time `json:",omitempty"` + + CurrentNodeType NodeType `json:",omitempty"` + CurrentNodeText string `json:",omitempty"` + CurrentNodeLocation CodeLocation `json:",omitempty"` + CurrentNodeStartTime time.Time `json:",omitempty"` + + CurrentStepText string `json:",omitempty"` + CurrentStepLocation CodeLocation `json:",omitempty"` + CurrentStepStartTime time.Time `json:",omitempty"` + + AdditionalReports []string `json:",omitempty"` + + CapturedGinkgoWriterOutput string `json:",omitempty"` + TimelineLocation TimelineLocation `json:",omitempty"` + + Goroutines []Goroutine `json:",omitempty"` +} + +func (pr ProgressReport) IsZero() bool { + return pr.CurrentNodeType == NodeTypeInvalid +} + +func (pr ProgressReport) Time() time.Time { + return pr.TimelineLocation.Time +} + +func (pr ProgressReport) SpecGoroutine() Goroutine { + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine { + return goroutine + } + } + return Goroutine{} +} + +func (pr ProgressReport) HighlightedGoroutines() []Goroutine { + out := []Goroutine{} + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine || !goroutine.HasHighlights() { + continue + } + out = append(out, goroutine) + } + return out +} + +func (pr ProgressReport) OtherGoroutines() []Goroutine { + out := []Goroutine{} + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine || goroutine.HasHighlights() { + continue + } + out = append(out, goroutine) + } + return out +} + +func (pr ProgressReport) WithoutCapturedGinkgoWriterOutput() ProgressReport { + out := pr + out.CapturedGinkgoWriterOutput = "" + return out +} + +func (pr ProgressReport) WithoutOtherGoroutines() ProgressReport { + out := pr + filteredGoroutines := []Goroutine{} + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine || goroutine.HasHighlights() { + filteredGoroutines = append(filteredGoroutines, goroutine) + } + } + out.Goroutines = filteredGoroutines + return out +} + +func (pr ProgressReport) GetTimelineLocation() TimelineLocation { + return pr.TimelineLocation +} + +type Goroutine struct { + ID uint64 + State string + Stack []FunctionCall + IsSpecGoroutine bool +} + +func (g Goroutine) IsZero() bool { + return g.ID == 0 +} + +func (g Goroutine) HasHighlights() bool { + for _, fc := range g.Stack { + if fc.Highlight { + return true + } + } + + return false +} + +type FunctionCall struct { + Function string + Filename string + Line int + Highlight bool `json:",omitempty"` + Source []string `json:",omitempty"` + SourceHighlight int `json:",omitempty"` +} + +// NodeType captures the type of a given Ginkgo Node +type NodeType uint + +const ( + NodeTypeInvalid NodeType = 0 + + NodeTypeContainer NodeType = 1 << iota + NodeTypeIt + + NodeTypeBeforeEach + NodeTypeJustBeforeEach + NodeTypeAfterEach + NodeTypeJustAfterEach + + NodeTypeBeforeAll + NodeTypeAfterAll + + NodeTypeBeforeSuite + NodeTypeSynchronizedBeforeSuite + NodeTypeAfterSuite + NodeTypeSynchronizedAfterSuite + + NodeTypeReportBeforeEach + NodeTypeReportAfterEach + NodeTypeReportBeforeSuite + NodeTypeReportAfterSuite + + NodeTypeCleanupInvalid + NodeTypeCleanupAfterEach + NodeTypeCleanupAfterAll + NodeTypeCleanupAfterSuite +) + +var NodeTypesForContainerAndIt = NodeTypeContainer | NodeTypeIt +var NodeTypesForSuiteLevelNodes = NodeTypeBeforeSuite | NodeTypeSynchronizedBeforeSuite | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite | NodeTypeCleanupAfterSuite +var NodeTypesAllowedDuringCleanupInterrupt = NodeTypeAfterEach | NodeTypeJustAfterEach | NodeTypeAfterAll | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeCleanupAfterEach | NodeTypeCleanupAfterAll | NodeTypeCleanupAfterSuite +var NodeTypesAllowedDuringReportInterrupt = NodeTypeReportBeforeEach | NodeTypeReportAfterEach | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite + +var ntEnumSupport = NewEnumSupport(map[uint]string{ + uint(NodeTypeInvalid): "INVALID NODE TYPE", + uint(NodeTypeContainer): "Container", + uint(NodeTypeIt): "It", + uint(NodeTypeBeforeEach): "BeforeEach", + uint(NodeTypeJustBeforeEach): "JustBeforeEach", + uint(NodeTypeAfterEach): "AfterEach", + uint(NodeTypeJustAfterEach): "JustAfterEach", + uint(NodeTypeBeforeAll): "BeforeAll", + uint(NodeTypeAfterAll): "AfterAll", + uint(NodeTypeBeforeSuite): "BeforeSuite", + uint(NodeTypeSynchronizedBeforeSuite): "SynchronizedBeforeSuite", + uint(NodeTypeAfterSuite): "AfterSuite", + uint(NodeTypeSynchronizedAfterSuite): "SynchronizedAfterSuite", + uint(NodeTypeReportBeforeEach): "ReportBeforeEach", + uint(NodeTypeReportAfterEach): "ReportAfterEach", + uint(NodeTypeReportBeforeSuite): "ReportBeforeSuite", + uint(NodeTypeReportAfterSuite): "ReportAfterSuite", + uint(NodeTypeCleanupInvalid): "DeferCleanup", + uint(NodeTypeCleanupAfterEach): "DeferCleanup (Each)", + uint(NodeTypeCleanupAfterAll): "DeferCleanup (All)", + uint(NodeTypeCleanupAfterSuite): "DeferCleanup (Suite)", +}) + +func (nt NodeType) String() string { + return ntEnumSupport.String(uint(nt)) +} +func (nt *NodeType) UnmarshalJSON(b []byte) error { + out, err := ntEnumSupport.UnmarshJSON(b) + *nt = NodeType(out) + return err +} +func (nt NodeType) MarshalJSON() ([]byte, error) { + return ntEnumSupport.MarshJSON(uint(nt)) +} + +func (nt NodeType) Is(nodeTypes NodeType) bool { + return nt&nodeTypes != 0 +} + +/* +SpecEvent captures a vareity of events that can occur when specs run. See SpecEventType for the list of available events. +*/ +type SpecEvent struct { + SpecEventType SpecEventType + + CodeLocation CodeLocation + TimelineLocation TimelineLocation + + Message string `json:",omitempty"` + Duration time.Duration `json:",omitempty"` + NodeType NodeType `json:",omitempty"` + Attempt int `json:",omitempty"` +} + +func (se SpecEvent) GetTimelineLocation() TimelineLocation { + return se.TimelineLocation +} + +func (se SpecEvent) IsOnlyVisibleAtVeryVerbose() bool { + return se.SpecEventType.Is(SpecEventByEnd | SpecEventNodeStart | SpecEventNodeEnd) +} + +func (se SpecEvent) GomegaString() string { + out := &strings.Builder{} + out.WriteString("[" + se.SpecEventType.String() + " SpecEvent] ") + if se.Message != "" { + out.WriteString("Message=") + out.WriteString(`"` + se.Message + `",`) + } + if se.Duration != 0 { + out.WriteString("Duration=" + se.Duration.String() + ",") + } + if se.NodeType != NodeTypeInvalid { + out.WriteString("NodeType=" + se.NodeType.String() + ",") + } + if se.Attempt != 0 { + out.WriteString(fmt.Sprintf("Attempt=%d", se.Attempt) + ",") + } + out.WriteString("CL=" + se.CodeLocation.String() + ",") + out.WriteString(fmt.Sprintf("TL.Offset=%d", se.TimelineLocation.Offset)) + + return out.String() +} + +type SpecEvents []SpecEvent + +func (se SpecEvents) WithType(seType SpecEventType) SpecEvents { + out := SpecEvents{} + for _, event := range se { + if event.SpecEventType.Is(seType) { + out = append(out, event) + } + } + return out +} + +type SpecEventType uint + +const ( + SpecEventInvalid SpecEventType = 0 + + SpecEventByStart SpecEventType = 1 << iota + SpecEventByEnd + SpecEventNodeStart + SpecEventNodeEnd + SpecEventSpecRepeat + SpecEventSpecRetry +) + +var seEnumSupport = NewEnumSupport(map[uint]string{ + uint(SpecEventInvalid): "INVALID SPEC EVENT", + uint(SpecEventByStart): "By", + uint(SpecEventByEnd): "By (End)", + uint(SpecEventNodeStart): "Node", + uint(SpecEventNodeEnd): "Node (End)", + uint(SpecEventSpecRepeat): "Repeat", + uint(SpecEventSpecRetry): "Retry", +}) + +func (se SpecEventType) String() string { + return seEnumSupport.String(uint(se)) +} +func (se *SpecEventType) UnmarshalJSON(b []byte) error { + out, err := seEnumSupport.UnmarshJSON(b) + *se = SpecEventType(out) + return err +} +func (se SpecEventType) MarshalJSON() ([]byte, error) { + return seEnumSupport.MarshJSON(uint(se)) +} + +func (se SpecEventType) Is(specEventTypes SpecEventType) bool { + return se&specEventTypes != 0 +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go new file mode 100644 index 00000000..52cc3abc --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -0,0 +1,3 @@ +package types + +const VERSION = "2.17.3" diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index fece58b1..89230f19 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,38 @@ +## 1.33.0 + +### Features + +`Receive` not accepts `Receive(, MATCHER>)`, allowing you to pick out a specific value on the channel that satisfies the provided matcher and is stored in the provided pointer. + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.15.0 to 2.17.1 (#745) [9999deb] +- Bump github-pages from 229 to 230 in /docs (#735) [cb5ff21] +- Bump golang.org/x/net from 0.20.0 to 0.23.0 (#746) [bac6596] + +## 1.32.0 + +### Maintenance +- Migrate github.com/golang/protobuf to google.golang.org/protobuf [436a197] + + This release drops the deprecated github.com/golang/protobuf and adopts google.golang.org/protobuf. Care was taken to ensure the release is backwards compatible (thanks @jbduncan !). Please open an issue if you run into one. + +- chore: test with Go 1.22 (#733) [32ef35e] +- Bump golang.org/x/net from 0.19.0 to 0.20.0 (#717) [a0d0387] +- Bump github-pages and jekyll-feed in /docs (#732) [b71e477] +- docs: fix typo and broken anchor link to gstruct [f460154] +- docs: fix HaveEach matcher signature [a2862e4] + +## 1.31.1 + +### Fixes +- Inverted arguments order of FailureMessage of BeComparableToMatcher [e0dd999] +- Update test in case keeping msg is desired [ad1a367] + +### Maintenance +- Show how to import the format sub package [24e958d] +- tidy up go.sum [26661b8] +- bump dependencies [bde8f7a] + ## 1.31.0 ### Features diff --git a/vendor/github.com/onsi/gomega/ghttp/handlers.go b/vendor/github.com/onsi/gomega/ghttp/handlers.go index b2d1c2c4..eb99514e 100644 --- a/vendor/github.com/onsi/gomega/ghttp/handlers.go +++ b/vendor/github.com/onsi/gomega/ghttp/handlers.go @@ -11,11 +11,13 @@ import ( "reflect" "strings" - "github.com/golang/protobuf/proto" "github.com/onsi/gomega" . "github.com/onsi/gomega" "github.com/onsi/gomega/internal/gutil" "github.com/onsi/gomega/types" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/protoadapt" + "google.golang.org/protobuf/runtime/protoiface" ) type GHTTPWithGomega struct { @@ -193,7 +195,7 @@ func (g GHTTPWithGomega) VerifyFormKV(key string, values ...string) http.Handler // representation of the passed message. // // VerifyProtoRepresenting also verifies that the request's content type is application/x-protobuf -func (g GHTTPWithGomega) VerifyProtoRepresenting(expected proto.Message) http.HandlerFunc { +func (g GHTTPWithGomega) VerifyProtoRepresenting(expected protoiface.MessageV1) http.HandlerFunc { return CombineHandlers( g.VerifyContentType("application/x-protobuf"), func(w http.ResponseWriter, req *http.Request) { @@ -204,13 +206,14 @@ func (g GHTTPWithGomega) VerifyProtoRepresenting(expected proto.Message) http.Ha expectedType := reflect.TypeOf(expected) actualValuePtr := reflect.New(expectedType.Elem()) - actual, ok := actualValuePtr.Interface().(proto.Message) - g.gomega.Expect(ok).Should(BeTrueBecause("Message value should be a proto.Message")) + actual, ok := actualValuePtr.Interface().(protoiface.MessageV1) + g.gomega.Expect(ok).Should(BeTrueBecause("Message value should be a protoiface.MessageV1")) - err = proto.Unmarshal(body, actual) + err = proto.Unmarshal(body, protoadapt.MessageV2Of(actual)) g.gomega.Expect(err).ShouldNot(HaveOccurred(), "Failed to unmarshal protobuf") - g.gomega.Expect(actual).Should(Equal(expected), "ProtoBuf Mismatch") + g.gomega.Expect(proto.Equal(protoadapt.MessageV2Of(expected), protoadapt.MessageV2Of(actual))). + Should(BeTrue(), "ProtoBuf Mismatch") }, ) } @@ -328,9 +331,9 @@ func (g GHTTPWithGomega) RespondWithJSONEncodedPtr(statusCode *int, object inter // containing the protobuf serialization of the provided message. // // Also, RespondWithProto can be given an optional http.Header. The headers defined therein will be added to the response headers. -func (g GHTTPWithGomega) RespondWithProto(statusCode int, message proto.Message, optionalHeader ...http.Header) http.HandlerFunc { +func (g GHTTPWithGomega) RespondWithProto(statusCode int, message protoadapt.MessageV1, optionalHeader ...http.Header) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { - data, err := proto.Marshal(message) + data, err := proto.Marshal(protoadapt.MessageV2Of(message)) g.gomega.Expect(err).ShouldNot(HaveOccurred()) var headers http.Header @@ -397,7 +400,7 @@ func VerifyFormKV(key string, values ...string) http.HandlerFunc { return NewGHTTPWithGomega(gomega.Default).VerifyFormKV(key, values...) } -func VerifyProtoRepresenting(expected proto.Message) http.HandlerFunc { +func VerifyProtoRepresenting(expected protoiface.MessageV1) http.HandlerFunc { return NewGHTTPWithGomega(gomega.Default).VerifyProtoRepresenting(expected) } @@ -417,6 +420,6 @@ func RespondWithJSONEncodedPtr(statusCode *int, object interface{}, optionalHead return NewGHTTPWithGomega(gomega.Default).RespondWithJSONEncodedPtr(statusCode, object, optionalHeader...) } -func RespondWithProto(statusCode int, message proto.Message, optionalHeader ...http.Header) http.HandlerFunc { +func RespondWithProto(statusCode int, message protoadapt.MessageV1, optionalHeader ...http.Header) http.HandlerFunc { return NewGHTTPWithGomega(gomega.Default).RespondWithProto(statusCode, message, optionalHeader...) } diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 4f7ab279..1980a63c 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.31.0" +const GOMEGA_VERSION = "1.33.0" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index 8860d677..7ef27dc9 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -194,20 +194,21 @@ func BeClosed() types.GomegaMatcher { // // will repeatedly attempt to pull values out of `c` until a value matching "bar" is received. // -// Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: +// Furthermore, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: // // var myThing thing // Eventually(thingChan).Should(Receive(&myThing)) // Expect(myThing.Sprocket).Should(Equal("foo")) // Expect(myThing.IsValid()).Should(BeTrue()) +// +// Finally, if you want to match the received object as well as get the actual received value into a variable, so you can reason further about the value received, +// you can pass a pointer to a variable of the approriate type first, and second a matcher: +// +// var myThing thing +// Eventually(thingChan).Should(Receive(&myThing, ContainSubstring("bar"))) func Receive(args ...interface{}) types.GomegaMatcher { - var arg interface{} - if len(args) > 0 { - arg = args[0] - } - return &matchers.ReceiveMatcher{ - Arg: arg, + Args: args, } } diff --git a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go index 8ab4bb91..4e389785 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go @@ -41,9 +41,9 @@ func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, m } func (matcher *BeComparableToMatcher) FailureMessage(actual interface{}) (message string) { - return cmp.Diff(matcher.Expected, actual, matcher.Options) + return fmt.Sprint("Expected object to be comparable, diff: ", cmp.Diff(actual, matcher.Expected, matcher.Options...)) } func (matcher *BeComparableToMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to equal", matcher.Expected) + return format.Message(actual, "not to be comparable to", matcher.Expected) } diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go index 1936a2ba..948164ea 100644 --- a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go @@ -3,6 +3,7 @@ package matchers import ( + "errors" "fmt" "reflect" @@ -10,7 +11,7 @@ import ( ) type ReceiveMatcher struct { - Arg interface{} + Args []interface{} receivedValue reflect.Value channelClosed bool } @@ -29,15 +30,38 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro var subMatcher omegaMatcher var hasSubMatcher bool - - if matcher.Arg != nil { - subMatcher, hasSubMatcher = (matcher.Arg).(omegaMatcher) + var resultReference interface{} + + // Valid arg formats are as follows, always with optional POINTER before + // optional MATCHER: + // - Receive() + // - Receive(POINTER) + // - Receive(MATCHER) + // - Receive(POINTER, MATCHER) + args := matcher.Args + if len(args) > 0 { + arg := args[0] + _, isSubMatcher := arg.(omegaMatcher) + if !isSubMatcher && reflect.ValueOf(arg).Kind() == reflect.Ptr { + // Consume optional POINTER arg first, if it ain't no matcher ;) + resultReference = arg + args = args[1:] + } + } + if len(args) > 0 { + arg := args[0] + subMatcher, hasSubMatcher = arg.(omegaMatcher) if !hasSubMatcher { - argType := reflect.TypeOf(matcher.Arg) - if argType.Kind() != reflect.Ptr { - return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(matcher.Arg, 1)) - } + // At this point we assume the dev user wanted to assign a received + // value, so [POINTER,]MATCHER. + return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(arg, 1)) } + // Consume optional MATCHER arg. + args = args[1:] + } + if len(args) > 0 { + // If there are still args present, reject all. + return false, errors.New("Receive matcher expects at most an optional pointer and/or an optional matcher") } winnerIndex, value, open := reflect.Select([]reflect.SelectCase{ @@ -58,16 +82,20 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro } if hasSubMatcher { - if didReceive { - matcher.receivedValue = value - return subMatcher.Match(matcher.receivedValue.Interface()) + if !didReceive { + return false, nil } - return false, nil + matcher.receivedValue = value + if match, err := subMatcher.Match(matcher.receivedValue.Interface()); err != nil || !match { + return match, err + } + // if we received a match, then fall through in order to handle an + // optional assignment of the received value to the specified reference. } if didReceive { - if matcher.Arg != nil { - outValue := reflect.ValueOf(matcher.Arg) + if resultReference != nil { + outValue := reflect.ValueOf(resultReference) if value.Type().AssignableTo(outValue.Elem().Type()) { outValue.Elem().Set(value) @@ -77,7 +105,7 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro outValue.Elem().Set(value.Elem()) return true, nil } else { - return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nType:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(value.Interface(), 1), format.Object(matcher.Arg, 1)) + return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nType:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(value.Interface(), 1), format.Object(resultReference, 1)) } } @@ -88,7 +116,11 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro } func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) { - subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) + var matcherArg interface{} + if len(matcher.Args) > 0 { + matcherArg = matcher.Args[len(matcher.Args)-1] + } + subMatcher, hasSubMatcher := (matcherArg).(omegaMatcher) closedAddendum := "" if matcher.channelClosed { @@ -105,7 +137,11 @@ func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message strin } func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) { - subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) + var matcherArg interface{} + if len(matcher.Args) > 0 { + matcherArg = matcher.Args[len(matcher.Args)-1] + } + subMatcher, hasSubMatcher := (matcherArg).(omegaMatcher) closedAddendum := "" if matcher.channelClosed { diff --git a/vendor/github.com/openzipkin/zipkin-go/LICENSE b/vendor/github.com/openzipkin/zipkin-go/LICENSE new file mode 100644 index 00000000..2ff72246 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/LICENSE @@ -0,0 +1,201 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, +and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by +the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all +other entities that control, are controlled by, or are under common +control with that entity. For the purposes of this definition, +"control" means (i) the power, direct or indirect, to cause the +direction or management of such entity, whether by contract or +otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity +exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, +including but not limited to software source code, documentation +source, and configuration files. + +"Object" form shall mean any form resulting from mechanical +transformation or translation of a Source form, including but +not limited to compiled object code, generated documentation, +and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or +Object form, made available under the License, as indicated by a +copyright notice that is included in or attached to the work +(an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object +form, that is based on (or derived from) the Work and for which the +editorial revisions, annotations, elaborations, or other modifications +represent, as a whole, an original work of authorship. For the purposes +of this License, Derivative Works shall not include works that remain +separable from, or merely link (or bind by name) to the interfaces of, +the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including +the original version of the Work and any modifications or additions +to that Work or Derivative Works thereof, that is intentionally +submitted to Licensor for inclusion in the Work by the copyright owner +or by an individual or Legal Entity authorized to submit on behalf of +the copyright owner. For the purposes of this definition, "submitted" +means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, +and issue tracking systems that are managed by, or on behalf of, the +Licensor for the purpose of discussing and improving the Work, but +excluding communication that is conspicuously marked or otherwise +designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity +on behalf of whom a Contribution has been received by Licensor and +subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the +Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +(except as stated in this section) patent license to make, have made, +use, offer to sell, sell, import, and otherwise transfer the Work, +where such license applies only to those patent claims licensable +by such Contributor that are necessarily infringed by their +Contribution(s) alone or by combination of their Contribution(s) +with the Work to which such Contribution(s) was submitted. If You +institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work +or a Contribution incorporated within the Work constitutes direct +or contributory patent infringement, then any patent licenses +granted to You under this License for that Work shall terminate +as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the +Work or Derivative Works thereof in any medium, with or without +modifications, and in Source or Object form, provided that You +meet the following conditions: + +(a) You must give any other recipients of the Work or +Derivative Works a copy of this License; and + +(b) You must cause any modified files to carry prominent notices +stating that You changed the files; and + +(c) You must retain, in the Source form of any Derivative Works +that You distribute, all copyright, patent, trademark, and +attribution notices from the Source form of the Work, +excluding those notices that do not pertain to any part of +the Derivative Works; and + +(d) If the Work includes a "NOTICE" text file as part of its +distribution, then any Derivative Works that You distribute must +include a readable copy of the attribution notices contained +within such NOTICE file, excluding those notices that do not +pertain to any part of the Derivative Works, in at least one +of the following places: within a NOTICE text file distributed +as part of the Derivative Works; within the Source form or +documentation, if provided along with the Derivative Works; or, +within a display generated by the Derivative Works, if and +wherever such third-party notices normally appear. The contents +of the NOTICE file are for informational purposes only and +do not modify the License. You may add Your own attribution +notices within Derivative Works that You distribute, alongside +or as an addendum to the NOTICE text from the Work, provided +that such additional attribution notices cannot be construed +as modifying the License. + +You may add Your own copyright statement to Your modifications and +may provide additional or different license terms and conditions +for use, reproduction, or distribution of Your modifications, or +for any such Derivative Works as a whole, provided Your use, +reproduction, and distribution of the Work otherwise complies with +the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, +any Contribution intentionally submitted for inclusion in the Work +by You to the Licensor shall be under the terms and conditions of +this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify +the terms of any separate license agreement you may have executed +with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade +names, trademarks, service marks, or product names of the Licensor, +except as required for reasonable and customary use in describing the +origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or +agreed to in writing, Licensor provides the Work (and each +Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied, including, without limitation, any warranties or conditions +of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any +risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, +whether in tort (including negligence), contract, or otherwise, +unless required by applicable law (such as deliberate and grossly +negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, +incidental, or consequential damages of any character arising as a +result of this License or out of the use or inability to use the +Work (including but not limited to damages for loss of goodwill, +work stoppage, computer failure or malfunction, or any and all +other commercial damages or losses), even if such Contributor +has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing +the Work or Derivative Works thereof, You may choose to offer, +and charge a fee for, acceptance of support, warranty, indemnity, +or other liability obligations and/or rights consistent with this +License. However, in accepting such obligations, You may act only +on Your own behalf and on Your sole responsibility, not on behalf +of any other Contributor, and only if You agree to indemnify, +defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason +of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following +boilerplate notice, with the fields enclosed by brackets "{}" +replaced with your own identifying information. (Don't include +the brackets!) The text should be enclosed in the appropriate +comment syntax for the file format. We also recommend that a +file or class name and description of purpose be included on the +same "printed page" as the copyright notice for easier +identification within third-party archives. + +Copyright 2017 The OpenZipkin Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/openzipkin/zipkin-go/idgenerator/idgenerator.go b/vendor/github.com/openzipkin/zipkin-go/idgenerator/idgenerator.go new file mode 100644 index 00000000..0cb5a96f --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/idgenerator/idgenerator.go @@ -0,0 +1,130 @@ +// Copyright 2022 The OpenZipkin Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package idgenerator contains several Span and Trace ID generators which can be +used by the Zipkin tracer. Additional third party generators can be plugged in +if they adhere to the IDGenerator interface. +*/ +package idgenerator + +import ( + "math/rand" + "sync" + "time" + + "github.com/openzipkin/zipkin-go/model" +) + +var ( + seededIDGen = rand.New(rand.NewSource(time.Now().UnixNano())) + // NewSource returns a new pseudo-random Source seeded with the given value. + // Unlike the default Source used by top-level functions, this source is not + // safe for concurrent use by multiple goroutines. Hence the need for a mutex. + seededIDLock sync.Mutex +) + +// IDGenerator interface can be used to provide the Zipkin Tracer with custom +// implementations to generate Span and Trace IDs. +type IDGenerator interface { + SpanID(traceID model.TraceID) model.ID // Generates a new Span ID + TraceID() model.TraceID // Generates a new Trace ID +} + +// NewRandom64 returns an ID Generator which can generate 64 bit trace and span +// id's +func NewRandom64() IDGenerator { + return &randomID64{} +} + +// NewRandom128 returns an ID Generator which can generate 128 bit trace and 64 +// bit span id's +func NewRandom128() IDGenerator { + return &randomID128{} +} + +// NewRandomTimestamped generates 128 bit time sortable traceid's and 64 bit +// spanid's. +func NewRandomTimestamped() IDGenerator { + return &randomTimestamped{} +} + +// randomID64 can generate 64 bit traceid's and 64 bit spanid's. +type randomID64 struct{} + +func (r *randomID64) TraceID() (id model.TraceID) { + seededIDLock.Lock() + id = model.TraceID{ + Low: uint64(seededIDGen.Int63()), + } + seededIDLock.Unlock() + return +} + +func (r *randomID64) SpanID(traceID model.TraceID) (id model.ID) { + if !traceID.Empty() { + return model.ID(traceID.Low) + } + seededIDLock.Lock() + id = model.ID(seededIDGen.Int63()) + seededIDLock.Unlock() + return +} + +// randomID128 can generate 128 bit traceid's and 64 bit spanid's. +type randomID128 struct{} + +func (r *randomID128) TraceID() (id model.TraceID) { + seededIDLock.Lock() + id = model.TraceID{ + High: uint64(seededIDGen.Int63()), + Low: uint64(seededIDGen.Int63()), + } + seededIDLock.Unlock() + return +} + +func (r *randomID128) SpanID(traceID model.TraceID) (id model.ID) { + if !traceID.Empty() { + return model.ID(traceID.Low) + } + seededIDLock.Lock() + id = model.ID(seededIDGen.Int63()) + seededIDLock.Unlock() + return +} + +// randomTimestamped can generate 128 bit time sortable traceid's compatible +// with AWS X-Ray and 64 bit spanid's. +type randomTimestamped struct{} + +func (t *randomTimestamped) TraceID() (id model.TraceID) { + seededIDLock.Lock() + id = model.TraceID{ + High: uint64(time.Now().Unix()<<32) + uint64(seededIDGen.Int31()), + Low: uint64(seededIDGen.Int63()), + } + seededIDLock.Unlock() + return +} + +func (t *randomTimestamped) SpanID(traceID model.TraceID) (id model.ID) { + if !traceID.Empty() { + return model.ID(traceID.Low) + } + seededIDLock.Lock() + id = model.ID(seededIDGen.Int63()) + seededIDLock.Unlock() + return +} diff --git a/vendor/github.com/openzipkin/zipkin-go/model/annotation.go b/vendor/github.com/openzipkin/zipkin-go/model/annotation.go new file mode 100644 index 00000000..02d09fb1 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/model/annotation.go @@ -0,0 +1,60 @@ +// Copyright 2022 The OpenZipkin Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "errors" + "time" +) + +// ErrValidTimestampRequired error +var ErrValidTimestampRequired = errors.New("valid annotation timestamp required") + +// Annotation associates an event that explains latency with a timestamp. +type Annotation struct { + Timestamp time.Time + Value string +} + +// MarshalJSON implements custom JSON encoding +func (a *Annotation) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + Timestamp int64 `json:"timestamp"` + Value string `json:"value"` + }{ + Timestamp: a.Timestamp.Round(time.Microsecond).UnixNano() / 1e3, + Value: a.Value, + }) +} + +// UnmarshalJSON implements custom JSON decoding +func (a *Annotation) UnmarshalJSON(b []byte) error { + type Alias Annotation + annotation := &struct { + TimeStamp uint64 `json:"timestamp"` + *Alias + }{ + Alias: (*Alias)(a), + } + if err := json.Unmarshal(b, &annotation); err != nil { + return err + } + if annotation.TimeStamp < 1 { + return ErrValidTimestampRequired + } + a.Timestamp = time.Unix(0, int64(annotation.TimeStamp)*1e3) + return nil +} diff --git a/vendor/github.com/openzipkin/zipkin-go/model/doc.go b/vendor/github.com/openzipkin/zipkin-go/model/doc.go new file mode 100644 index 00000000..4cae4e07 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/model/doc.go @@ -0,0 +1,23 @@ +// Copyright 2022 The OpenZipkin Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package model contains the Zipkin V2 model which is used by the Zipkin Go +tracer implementation. + +Third party instrumentation libraries can use the model and transport packages +found in this Zipkin Go library to directly interface with the Zipkin Server or +Zipkin Collectors without the need to use the tracer implementation itself. +*/ +package model diff --git a/vendor/github.com/openzipkin/zipkin-go/model/endpoint.go b/vendor/github.com/openzipkin/zipkin-go/model/endpoint.go new file mode 100644 index 00000000..48e2afd6 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/model/endpoint.go @@ -0,0 +1,50 @@ +// Copyright 2022 The OpenZipkin Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "net" + "strings" +) + +// Endpoint holds the network context of a node in the service graph. +type Endpoint struct { + ServiceName string + IPv4 net.IP + IPv6 net.IP + Port uint16 +} + +// MarshalJSON exports our Endpoint into the correct format for the Zipkin V2 API. +func (e Endpoint) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + ServiceName string `json:"serviceName,omitempty"` + IPv4 net.IP `json:"ipv4,omitempty"` + IPv6 net.IP `json:"ipv6,omitempty"` + Port uint16 `json:"port,omitempty"` + }{ + strings.ToLower(e.ServiceName), + e.IPv4, + e.IPv6, + e.Port, + }) +} + +// Empty returns if all Endpoint properties are empty / unspecified. +func (e *Endpoint) Empty() bool { + return e == nil || + (e.ServiceName == "" && e.Port == 0 && len(e.IPv4) == 0 && len(e.IPv6) == 0) +} diff --git a/vendor/github.com/openzipkin/zipkin-go/model/kind.go b/vendor/github.com/openzipkin/zipkin-go/model/kind.go new file mode 100644 index 00000000..d247c020 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/model/kind.go @@ -0,0 +1,27 @@ +// Copyright 2022 The OpenZipkin Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// Kind clarifies context of timestamp, duration and remoteEndpoint in a span. +type Kind string + +// Available Kind values +const ( + Undetermined Kind = "" + Client Kind = "CLIENT" + Server Kind = "SERVER" + Producer Kind = "PRODUCER" + Consumer Kind = "CONSUMER" +) diff --git a/vendor/github.com/openzipkin/zipkin-go/model/span.go b/vendor/github.com/openzipkin/zipkin-go/model/span.go new file mode 100644 index 00000000..cf30bfac --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/model/span.go @@ -0,0 +1,161 @@ +// Copyright 2022 The OpenZipkin Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "errors" + "strings" + "time" +) + +// unmarshal errors +var ( + ErrValidTraceIDRequired = errors.New("valid traceId required") + ErrValidIDRequired = errors.New("valid span id required") + ErrValidDurationRequired = errors.New("valid duration required") +) + +// BaggageFields holds the interface for consumers needing to interact with +// the fields in application logic. +type BaggageFields interface { + // Get returns the values for a field identified by its key. + Get(key string) []string + // Add adds the provided values to a header designated by key. If not + // accepted by the baggage implementation, it will return false. + Add(key string, value ...string) bool + // Set sets the provided values to a header designated by key. If not + // accepted by the baggage implementation, it will return false. + Set(key string, value ...string) bool + // Delete removes the field data designated by key. If not accepted by the + // baggage implementation, it will return false. + Delete(key string) bool + // Iterate will iterate over the available fields and for each one it will + // trigger the callback function. + Iterate(f func(key string, values []string)) +} + +// SpanContext holds the context of a Span. +type SpanContext struct { + TraceID TraceID `json:"traceId"` + ID ID `json:"id"` + ParentID *ID `json:"parentId,omitempty"` + Debug bool `json:"debug,omitempty"` + Sampled *bool `json:"-"` + Err error `json:"-"` + Baggage BaggageFields `json:"-"` +} + +// SpanModel structure. +// +// If using this library to instrument your application you will not need to +// directly access or modify this representation. The SpanModel is exported for +// use cases involving 3rd party Go instrumentation libraries desiring to +// export data to a Zipkin server using the Zipkin V2 Span model. +type SpanModel struct { + SpanContext + Name string `json:"name,omitempty"` + Kind Kind `json:"kind,omitempty"` + Timestamp time.Time `json:"-"` + Duration time.Duration `json:"-"` + Shared bool `json:"shared,omitempty"` + LocalEndpoint *Endpoint `json:"localEndpoint,omitempty"` + RemoteEndpoint *Endpoint `json:"remoteEndpoint,omitempty"` + Annotations []Annotation `json:"annotations,omitempty"` + Tags map[string]string `json:"tags,omitempty"` +} + +// MarshalJSON exports our Model into the correct format for the Zipkin V2 API. +func (s SpanModel) MarshalJSON() ([]byte, error) { + type Alias SpanModel + + var timestamp int64 + if !s.Timestamp.IsZero() { + if s.Timestamp.Unix() < 1 { + // Zipkin does not allow Timestamps before Unix epoch + return nil, ErrValidTimestampRequired + } + timestamp = s.Timestamp.Round(time.Microsecond).UnixNano() / 1e3 + } + + if s.Duration < time.Microsecond { + if s.Duration < 0 { + // negative duration is not allowed and signals a timing logic error + return nil, ErrValidDurationRequired + } else if s.Duration > 0 { + // sub microsecond durations are reported as 1 microsecond + s.Duration = 1 * time.Microsecond + } + } else { + // Duration will be rounded to nearest microsecond representation. + // + // NOTE: Duration.Round() is not available in Go 1.8 which we still support. + // To handle microsecond resolution rounding we'll add 500 nanoseconds to + // the duration. When truncated to microseconds in the call to marshal, it + // will be naturally rounded. See TestSpanDurationRounding in span_test.go + s.Duration += 500 * time.Nanosecond + } + + s.Name = strings.ToLower(s.Name) + + if s.LocalEndpoint.Empty() { + s.LocalEndpoint = nil + } + + if s.RemoteEndpoint.Empty() { + s.RemoteEndpoint = nil + } + + return json.Marshal(&struct { + T int64 `json:"timestamp,omitempty"` + D int64 `json:"duration,omitempty"` + Alias + }{ + T: timestamp, + D: s.Duration.Nanoseconds() / 1e3, + Alias: (Alias)(s), + }) +} + +// UnmarshalJSON imports our Model from a Zipkin V2 API compatible span +// representation. +func (s *SpanModel) UnmarshalJSON(b []byte) error { + type Alias SpanModel + span := &struct { + T uint64 `json:"timestamp,omitempty"` + D uint64 `json:"duration,omitempty"` + *Alias + }{ + Alias: (*Alias)(s), + } + if err := json.Unmarshal(b, &span); err != nil { + return err + } + if s.ID < 1 { + return ErrValidIDRequired + } + if span.T > 0 { + s.Timestamp = time.Unix(0, int64(span.T)*1e3) + } + s.Duration = time.Duration(span.D*1e3) * time.Nanosecond + if s.LocalEndpoint.Empty() { + s.LocalEndpoint = nil + } + + if s.RemoteEndpoint.Empty() { + s.RemoteEndpoint = nil + } + return nil +} diff --git a/vendor/github.com/openzipkin/zipkin-go/model/span_id.go b/vendor/github.com/openzipkin/zipkin-go/model/span_id.go new file mode 100644 index 00000000..acd72ea7 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/model/span_id.go @@ -0,0 +1,44 @@ +// Copyright 2022 The OpenZipkin Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" +) + +// ID type +type ID uint64 + +// String outputs the 64-bit ID as hex string. +func (i ID) String() string { + return fmt.Sprintf("%016x", uint64(i)) +} + +// MarshalJSON serializes an ID type (SpanID, ParentSpanID) to HEX. +func (i ID) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf("%q", i.String())), nil +} + +// UnmarshalJSON deserializes an ID type (SpanID, ParentSpanID) from HEX. +func (i *ID) UnmarshalJSON(b []byte) (err error) { + var id uint64 + if len(b) < 3 { + return nil + } + id, err = strconv.ParseUint(string(b[1:len(b)-1]), 16, 64) + *i = ID(id) + return err +} diff --git a/vendor/github.com/openzipkin/zipkin-go/model/traceid.go b/vendor/github.com/openzipkin/zipkin-go/model/traceid.go new file mode 100644 index 00000000..dca65535 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/model/traceid.go @@ -0,0 +1,75 @@ +// Copyright 2022 The OpenZipkin Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" +) + +// TraceID is a 128 bit number internally stored as 2x uint64 (high & low). +// In case of 64 bit traceIDs, the value can be found in Low. +type TraceID struct { + High uint64 + Low uint64 +} + +// Empty returns if TraceID has zero value. +func (t TraceID) Empty() bool { + return t.Low == 0 && t.High == 0 +} + +// String outputs the 128-bit traceID as hex string. +func (t TraceID) String() string { + if t.High == 0 { + return fmt.Sprintf("%016x", t.Low) + } + return fmt.Sprintf("%016x%016x", t.High, t.Low) +} + +// TraceIDFromHex returns the TraceID from a hex string. +func TraceIDFromHex(h string) (t TraceID, err error) { + if len(h) > 16 { + if t.High, err = strconv.ParseUint(h[0:len(h)-16], 16, 64); err != nil { + return + } + t.Low, err = strconv.ParseUint(h[len(h)-16:], 16, 64) + return + } + t.Low, err = strconv.ParseUint(h, 16, 64) + return +} + +// MarshalJSON custom JSON serializer to export the TraceID in the required +// zero padded hex representation. +func (t TraceID) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf("%q", t.String())), nil +} + +// UnmarshalJSON custom JSON deserializer to retrieve the traceID from the hex +// encoded representation. +func (t *TraceID) UnmarshalJSON(traceID []byte) error { + if len(traceID) < 3 { + return ErrValidTraceIDRequired + } + // A valid JSON string is encoded wrapped in double quotes. We need to trim + // these before converting the hex payload. + tID, err := TraceIDFromHex(string(traceID[1 : len(traceID)-1])) + if err != nil { + return err + } + *t = tID + return nil +} diff --git a/vendor/github.com/vito/go-sse/LICENSE.md b/vendor/github.com/vito/go-sse/LICENSE.md new file mode 100644 index 00000000..5c304d1a --- /dev/null +++ b/vendor/github.com/vito/go-sse/LICENSE.md @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/vito/go-sse/sse/errors.go b/vendor/github.com/vito/go-sse/sse/errors.go new file mode 100644 index 00000000..5cf74bc0 --- /dev/null +++ b/vendor/github.com/vito/go-sse/sse/errors.go @@ -0,0 +1,5 @@ +package sse + +import "errors" + +var ErrSourceClosed = errors.New("source closed") diff --git a/vendor/github.com/vito/go-sse/sse/event.go b/vendor/github.com/vito/go-sse/sse/event.go new file mode 100644 index 00000000..396a2079 --- /dev/null +++ b/vendor/github.com/vito/go-sse/sse/event.go @@ -0,0 +1,71 @@ +package sse + +import ( + "bytes" + "fmt" + "io" + "time" +) + +type Event struct { + ID string + Name string + Data []byte + Retry time.Duration +} + +func (event Event) Encode() string { + enc := fmt.Sprintf("id: %s\nevent: %s\n", event.ID, event.Name) + + if event.Retry != 0 { + enc += fmt.Sprintf("retry: %d\n", event.Retry/1000/1000) + } + + for _, line := range bytes.Split(event.Data, []byte("\n")) { + if len(line) == 0 { + enc += "data\n" + } else { + enc += fmt.Sprintf("data: %s\n", line) + } + } + + enc += "\n" + + return enc +} + +func (event Event) Write(destination io.Writer) error { + _, err := fmt.Fprintf(destination, "id: %s\n", event.ID) + if err != nil { + return err + } + + _, err = fmt.Fprintf(destination, "event: %s\n", event.Name) + if err != nil { + return err + } + + if event.Retry != 0 { + _, err = fmt.Fprintf(destination, "retry: %d\n", event.Retry/1000/1000) + if err != nil { + return err + } + } + + for _, line := range bytes.Split(event.Data, []byte("\n")) { + var err error + + if len(line) == 0 { + _, err = fmt.Fprintf(destination, "data\n") + } else { + _, err = fmt.Fprintf(destination, "data: %s\n", line) + } + + if err != nil { + return err + } + } + + _, err = fmt.Fprintf(destination, "\n") + return err +} diff --git a/vendor/github.com/vito/go-sse/sse/event_source.go b/vendor/github.com/vito/go-sse/sse/event_source.go new file mode 100644 index 00000000..04e0abf3 --- /dev/null +++ b/vendor/github.com/vito/go-sse/sse/event_source.go @@ -0,0 +1,268 @@ +package sse + +import ( + "fmt" + "io" + "net/http" + "sync" + "time" +) + +type BadResponseError struct { + Response *http.Response +} + +func (err BadResponseError) Error() string { + return fmt.Sprintf("bad response from event source: %s", err.Response.Status) +} + +// EventSource behaves like the EventSource interface from the Server-Sent +// Events spec implemented in many browsers. See +// http://www.w3.org/TR/eventsource/#the-eventsource-interface for details. +// +// To use, optionally call Connect(), and then call Next(). If Next() is called +// prior to Connect(), it will connect for you. +// +// Alternatively, create a Config struct instance and call Connect() and then call +// Next(). +// +// Next() is often called asynchronously in a loop so that the event source can +// be closed. Next() will block on reading from the server. +// +// If Close() is called while reading an event, Next() will return early, and +// subsequent calls to Next() will return early. To read new events, Connect() +// must be called. +// +// If an EOF is received, Next() returns io.EOF, and subsequent calls to Next() +// will return early. To read new events, Connect() must be called. +type EventSource struct { + client *http.Client + createRequest func() *http.Request + + currentReadCloser *ReadCloser + lastEventID string + lock sync.Mutex + + closeOnce *sync.Once + closed chan struct{} + + retryInterval time.Duration + maxRetries uint16 +} + +type RetryParams struct { + RetryInterval time.Duration + MaxRetries uint16 +} + +type Config struct { + Client *http.Client + RetryParams RetryParams + RequestCreator func() *http.Request +} + +func (c *Config) Connect() (*EventSource, error) { + client := c.Client + if client == nil { + client = http.DefaultClient + } + source := createEventSource(client, c.RetryParams, c.RequestCreator) + + readCloser, err := source.establishConnection() + if err != nil { + return nil, err + } + + source.currentReadCloser = readCloser + + return source, nil +} + +func NewEventSource(client *http.Client, defaultRetryInterval time.Duration, requestCreator func() *http.Request) *EventSource { + retryParams := RetryParams{ + RetryInterval: defaultRetryInterval, + } + return createEventSource(client, retryParams, requestCreator) +} + +func createEventSource(client *http.Client, retryParams RetryParams, requestCreator func() *http.Request) *EventSource { + return &EventSource{ + client: client, + createRequest: requestCreator, + + closeOnce: new(sync.Once), + closed: make(chan struct{}), + retryInterval: retryParams.RetryInterval, + maxRetries: retryParams.MaxRetries, + } +} + +func Connect(client *http.Client, defaultRetryInterval time.Duration, requestCreator func() *http.Request) (*EventSource, error) { + source := NewEventSource(client, defaultRetryInterval, requestCreator) + + readCloser, err := source.establishConnection() + if err != nil { + return nil, err + } + + source.currentReadCloser = readCloser + + return source, nil +} + +func (source *EventSource) Next() (Event, error) { + select { + case <-source.closed: + return Event{}, ErrSourceClosed + default: + } + + for { + readCloser, err := source.ensureReadCloser() + if err != nil { + return Event{}, err + } + + event, err := readCloser.Next() + if err == nil { + source.lastEventID = event.ID + + if event.Retry != 0 { + source.retryInterval = event.Retry + } + + return event, nil + } + + if err == io.EOF { + return Event{}, err + } + + readCloser.Close() + + if err := source.waitForRetry(); err != nil { + return Event{}, err + } + } + + panic("unreachable") +} + +func (source *EventSource) Close() error { + source.lock.Lock() + defer source.lock.Unlock() + + source.closeOnce.Do(func() { + close(source.closed) + }) + + if source.currentReadCloser != nil { + err := source.currentReadCloser.Close() + if err != nil { + return err + } + + source.currentReadCloser = nil + } + + return nil +} + +func (source *EventSource) ensureReadCloser() (*ReadCloser, error) { + source.lock.Lock() + + if source.currentReadCloser == nil { + source.lock.Unlock() + + newReadCloser, err := source.establishConnection() + if err != nil { + return nil, err + } + + source.lock.Lock() + + select { + case <-source.closed: + source.lock.Unlock() + newReadCloser.Close() + return nil, ErrSourceClosed + + default: + source.currentReadCloser = newReadCloser + } + } + + readCloser := source.currentReadCloser + + source.lock.Unlock() + + return readCloser, nil +} + +func (source *EventSource) establishConnection() (*ReadCloser, error) { + var connectionRetries uint16 + for { + req := source.createRequest() + + req.Header.Set("Last-Event-ID", source.lastEventID) + + res, err := source.client.Do(req) + if err != nil { + connectionRetries++ + if !source.shouldRetry(connectionRetries) { + return nil, err + } + err := source.waitForRetry() + if err != nil { + return nil, err + } + + continue + } + + switch res.StatusCode { + case http.StatusOK: + return NewReadCloser(res.Body), nil + + // reestablish the connection + case http.StatusInternalServerError, + http.StatusBadGateway, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout: + res.Body.Close() + + err := source.waitForRetry() + if err != nil { + return nil, err + } + + continue + + // fail the connection + default: + res.Body.Close() + + return nil, BadResponseError{ + Response: res, + } + } + } +} + +func (source *EventSource) waitForRetry() error { + source.lock.Lock() + source.currentReadCloser = nil + source.lock.Unlock() + + select { + case <-time.After(source.retryInterval): + return nil + case <-source.closed: + return ErrSourceClosed + } +} + +func (source *EventSource) shouldRetry(retries uint16) bool { + return source.maxRetries == 0 || + (source.maxRetries > 0 && retries <= source.maxRetries) +} diff --git a/vendor/github.com/vito/go-sse/sse/read_closer.go b/vendor/github.com/vito/go-sse/sse/read_closer.go new file mode 100644 index 00000000..066d7c92 --- /dev/null +++ b/vendor/github.com/vito/go-sse/sse/read_closer.go @@ -0,0 +1,122 @@ +package sse + +import ( + "bufio" + "bytes" + "errors" + "io" + "strconv" + "time" +) + +type ReadCloser struct { + lastID string + + buf *bufio.Reader + closeSource func() error + closed bool +} + +func NewReadCloser(source io.ReadCloser) *ReadCloser { + return &ReadCloser{ + closeSource: func() error { return source.Close() }, + buf: bufio.NewReader(source), + } +} + +var alreadyClosedError = errors.New("ReadCloser already closed") + +func (rc *ReadCloser) Close() error { + if rc.closed { + return alreadyClosedError + } + + rc.closed = true + + return rc.closeSource() +} + +func (rc *ReadCloser) Next() (Event, error) { + var event Event + + // event ID defaults to last ID per the spec + event.ID = rc.lastID + + // if an empty id is explicitly given, it sets the value and resets the last + // id; track its presence with a bool to distinguish between zero-value + idPresent := false + + prefix := []byte{} + for { + line, isPrefix, err := rc.buf.ReadLine() + if err != nil { + return Event{}, err + } + + line = append(prefix, line...) + + if isPrefix { + prefix = line + continue + } else { + prefix = []byte{} + } + + // empty line; dispatch event + if len(line) == 0 { + if len(event.Data) == 0 { + // event had no data; skip it per the spec + continue + } + + if idPresent { + // record last ID + rc.lastID = event.ID + } + + // trim terminating linebreak + event.Data = event.Data[0 : len(event.Data)-1] + + // dispatch event + return event, nil + } + + if line[0] == ':' { + // comment; skip + continue + } + + var field, value string + + segments := bytes.SplitN(line, []byte(":"), 2) + if len(segments) == 1 { + // line with no colon is just the field, with empty value + field = string(segments[0]) + } else { + field = string(segments[0]) + value = string(segments[1]) + } + + if len(value) > 0 { + // trim only a single leading space + if value[0] == ' ' { + value = value[1:] + } + } + + switch field { + case "id": + idPresent = true + event.ID = value + case "event": + event.Name = value + case "data": + event.Data = append(event.Data, []byte(value+"\n")...) + case "retry": + retryInMS, err := strconv.Atoi(value) + if err == nil { + event.Retry = time.Duration(retryInMS) * time.Millisecond + } + } + } +} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s index 66aebae2..c672ccf6 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s @@ -33,6 +33,9 @@ #define CONSTBASE R16 #define BLOCKS R17 +// for VPERMXOR +#define MASK R18 + DATA consts<>+0x00(SB)/8, $0x3320646e61707865 DATA consts<>+0x08(SB)/8, $0x6b20657479622d32 DATA consts<>+0x10(SB)/8, $0x0000000000000001 @@ -53,7 +56,11 @@ DATA consts<>+0x80(SB)/8, $0x6b2065746b206574 DATA consts<>+0x88(SB)/8, $0x6b2065746b206574 DATA consts<>+0x90(SB)/8, $0x0000000100000000 DATA consts<>+0x98(SB)/8, $0x0000000300000002 -GLOBL consts<>(SB), RODATA, $0xa0 +DATA consts<>+0xa0(SB)/8, $0x5566774411223300 +DATA consts<>+0xa8(SB)/8, $0xddeeffcc99aabb88 +DATA consts<>+0xb0(SB)/8, $0x6677445522330011 +DATA consts<>+0xb8(SB)/8, $0xeeffccddaabb8899 +GLOBL consts<>(SB), RODATA, $0xc0 //func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 @@ -70,6 +77,9 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 MOVD $48, R10 MOVD $64, R11 SRD $6, LEN, BLOCKS + // for VPERMXOR + MOVD $consts<>+0xa0(SB), MASK + MOVD $16, R20 // V16 LXVW4X (CONSTBASE)(R0), VS48 ADD $80,CONSTBASE @@ -87,6 +97,10 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 // V28 LXVW4X (CONSTBASE)(R11), VS60 + // Load mask constants for VPERMXOR + LXVW4X (MASK)(R0), V20 + LXVW4X (MASK)(R20), V21 + // splat slot from V19 -> V26 VSPLTW $0, V19, V26 @@ -97,7 +111,7 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 MOVD $10, R14 MOVD R14, CTR - + PCALIGN $16 loop_outer_vsx: // V0, V1, V2, V3 LXVW4X (R0)(CONSTBASE), VS32 @@ -128,22 +142,17 @@ loop_outer_vsx: VSPLTISW $12, V28 VSPLTISW $8, V29 VSPLTISW $7, V30 - + PCALIGN $16 loop_vsx: VADDUWM V0, V4, V0 VADDUWM V1, V5, V1 VADDUWM V2, V6, V2 VADDUWM V3, V7, V3 - VXOR V12, V0, V12 - VXOR V13, V1, V13 - VXOR V14, V2, V14 - VXOR V15, V3, V15 - - VRLW V12, V27, V12 - VRLW V13, V27, V13 - VRLW V14, V27, V14 - VRLW V15, V27, V15 + VPERMXOR V12, V0, V21, V12 + VPERMXOR V13, V1, V21, V13 + VPERMXOR V14, V2, V21, V14 + VPERMXOR V15, V3, V21, V15 VADDUWM V8, V12, V8 VADDUWM V9, V13, V9 @@ -165,15 +174,10 @@ loop_vsx: VADDUWM V2, V6, V2 VADDUWM V3, V7, V3 - VXOR V12, V0, V12 - VXOR V13, V1, V13 - VXOR V14, V2, V14 - VXOR V15, V3, V15 - - VRLW V12, V29, V12 - VRLW V13, V29, V13 - VRLW V14, V29, V14 - VRLW V15, V29, V15 + VPERMXOR V12, V0, V20, V12 + VPERMXOR V13, V1, V20, V13 + VPERMXOR V14, V2, V20, V14 + VPERMXOR V15, V3, V20, V15 VADDUWM V8, V12, V8 VADDUWM V9, V13, V9 @@ -195,15 +199,10 @@ loop_vsx: VADDUWM V2, V7, V2 VADDUWM V3, V4, V3 - VXOR V15, V0, V15 - VXOR V12, V1, V12 - VXOR V13, V2, V13 - VXOR V14, V3, V14 - - VRLW V15, V27, V15 - VRLW V12, V27, V12 - VRLW V13, V27, V13 - VRLW V14, V27, V14 + VPERMXOR V15, V0, V21, V15 + VPERMXOR V12, V1, V21, V12 + VPERMXOR V13, V2, V21, V13 + VPERMXOR V14, V3, V21, V14 VADDUWM V10, V15, V10 VADDUWM V11, V12, V11 @@ -225,15 +224,10 @@ loop_vsx: VADDUWM V2, V7, V2 VADDUWM V3, V4, V3 - VXOR V15, V0, V15 - VXOR V12, V1, V12 - VXOR V13, V2, V13 - VXOR V14, V3, V14 - - VRLW V15, V29, V15 - VRLW V12, V29, V12 - VRLW V13, V29, V13 - VRLW V14, V29, V14 + VPERMXOR V15, V0, V20, V15 + VPERMXOR V12, V1, V20, V12 + VPERMXOR V13, V2, V20, V13 + VPERMXOR V14, V3, V20, V14 VADDUWM V10, V15, V10 VADDUWM V11, V12, V11 @@ -249,48 +243,48 @@ loop_vsx: VRLW V6, V30, V6 VRLW V7, V30, V7 VRLW V4, V30, V4 - BC 16, LT, loop_vsx + BDNZ loop_vsx VADDUWM V12, V26, V12 - WORD $0x13600F8C // VMRGEW V0, V1, V27 - WORD $0x13821F8C // VMRGEW V2, V3, V28 + VMRGEW V0, V1, V27 + VMRGEW V2, V3, V28 - WORD $0x10000E8C // VMRGOW V0, V1, V0 - WORD $0x10421E8C // VMRGOW V2, V3, V2 + VMRGOW V0, V1, V0 + VMRGOW V2, V3, V2 - WORD $0x13A42F8C // VMRGEW V4, V5, V29 - WORD $0x13C63F8C // VMRGEW V6, V7, V30 + VMRGEW V4, V5, V29 + VMRGEW V6, V7, V30 XXPERMDI VS32, VS34, $0, VS33 XXPERMDI VS32, VS34, $3, VS35 XXPERMDI VS59, VS60, $0, VS32 XXPERMDI VS59, VS60, $3, VS34 - WORD $0x10842E8C // VMRGOW V4, V5, V4 - WORD $0x10C63E8C // VMRGOW V6, V7, V6 + VMRGOW V4, V5, V4 + VMRGOW V6, V7, V6 - WORD $0x13684F8C // VMRGEW V8, V9, V27 - WORD $0x138A5F8C // VMRGEW V10, V11, V28 + VMRGEW V8, V9, V27 + VMRGEW V10, V11, V28 XXPERMDI VS36, VS38, $0, VS37 XXPERMDI VS36, VS38, $3, VS39 XXPERMDI VS61, VS62, $0, VS36 XXPERMDI VS61, VS62, $3, VS38 - WORD $0x11084E8C // VMRGOW V8, V9, V8 - WORD $0x114A5E8C // VMRGOW V10, V11, V10 + VMRGOW V8, V9, V8 + VMRGOW V10, V11, V10 - WORD $0x13AC6F8C // VMRGEW V12, V13, V29 - WORD $0x13CE7F8C // VMRGEW V14, V15, V30 + VMRGEW V12, V13, V29 + VMRGEW V14, V15, V30 XXPERMDI VS40, VS42, $0, VS41 XXPERMDI VS40, VS42, $3, VS43 XXPERMDI VS59, VS60, $0, VS40 XXPERMDI VS59, VS60, $3, VS42 - WORD $0x118C6E8C // VMRGOW V12, V13, V12 - WORD $0x11CE7E8C // VMRGOW V14, V15, V14 + VMRGOW V12, V13, V12 + VMRGOW V14, V15, V14 VSPLTISW $4, V27 VADDUWM V26, V27, V26 @@ -431,7 +425,7 @@ tail_vsx: ADD $-1, R11, R12 ADD $-1, INP ADD $-1, OUT - + PCALIGN $16 looptail_vsx: // Copying the result to OUT // in bytes. @@ -439,7 +433,7 @@ looptail_vsx: MOVBZU 1(INP), TMP XOR KEY, TMP, KEY MOVBU KEY, 1(OUT) - BC 16, LT, looptail_vsx + BDNZ looptail_vsx // Clear the stack values STXVW4X VS48, (R11)(R0) diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index c2dfe326..e2ae4f89 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -426,6 +426,35 @@ func (l ServerAuthError) Error() string { return "[" + strings.Join(errs, ", ") + "]" } +// ServerAuthCallbacks defines server-side authentication callbacks. +type ServerAuthCallbacks struct { + // PasswordCallback behaves like [ServerConfig.PasswordCallback]. + PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) + + // PublicKeyCallback behaves like [ServerConfig.PublicKeyCallback]. + PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // KeyboardInteractiveCallback behaves like [ServerConfig.KeyboardInteractiveCallback]. + KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) + + // GSSAPIWithMICConfig behaves like [ServerConfig.GSSAPIWithMICConfig]. + GSSAPIWithMICConfig *GSSAPIWithMICConfig +} + +// PartialSuccessError can be returned by any of the [ServerConfig] +// authentication callbacks to indicate to the client that authentication has +// partially succeeded, but further steps are required. +type PartialSuccessError struct { + // Next defines the authentication callbacks to apply to further steps. The + // available methods communicated to the client are based on the non-nil + // ServerAuthCallbacks fields. + Next ServerAuthCallbacks +} + +func (p *PartialSuccessError) Error() string { + return "ssh: authenticated with partial success" +} + // ErrNoAuth is the error value returned if no // authentication method has been passed yet. This happens as a normal // part of the authentication loop, since the client first tries @@ -439,8 +468,18 @@ func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, err var perms *Permissions authFailures := 0 + noneAuthCount := 0 var authErrs []error var displayedBanner bool + partialSuccessReturned := false + // Set the initial authentication callbacks from the config. They can be + // changed if a PartialSuccessError is returned. + authConfig := ServerAuthCallbacks{ + PasswordCallback: config.PasswordCallback, + PublicKeyCallback: config.PublicKeyCallback, + KeyboardInteractiveCallback: config.KeyboardInteractiveCallback, + GSSAPIWithMICConfig: config.GSSAPIWithMICConfig, + } userAuthLoop: for { @@ -471,6 +510,11 @@ userAuthLoop: return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) } + if s.user != userAuthReq.User && partialSuccessReturned { + return nil, fmt.Errorf("ssh: client changed the user after a partial success authentication, previous user %q, current user %q", + s.user, userAuthReq.User) + } + s.user = userAuthReq.User if !displayedBanner && config.BannerCallback != nil { @@ -491,20 +535,18 @@ userAuthLoop: switch userAuthReq.Method { case "none": - if config.NoClientAuth { + noneAuthCount++ + // We don't allow none authentication after a partial success + // response. + if config.NoClientAuth && !partialSuccessReturned { if config.NoClientAuthCallback != nil { perms, authErr = config.NoClientAuthCallback(s) } else { authErr = nil } } - - // allow initial attempt of 'none' without penalty - if authFailures == 0 { - authFailures-- - } case "password": - if config.PasswordCallback == nil { + if authConfig.PasswordCallback == nil { authErr = errors.New("ssh: password auth not configured") break } @@ -518,17 +560,17 @@ userAuthLoop: return nil, parseError(msgUserAuthRequest) } - perms, authErr = config.PasswordCallback(s, password) + perms, authErr = authConfig.PasswordCallback(s, password) case "keyboard-interactive": - if config.KeyboardInteractiveCallback == nil { + if authConfig.KeyboardInteractiveCallback == nil { authErr = errors.New("ssh: keyboard-interactive auth not configured") break } prompter := &sshClientKeyboardInteractive{s} - perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge) + perms, authErr = authConfig.KeyboardInteractiveCallback(s, prompter.Challenge) case "publickey": - if config.PublicKeyCallback == nil { + if authConfig.PublicKeyCallback == nil { authErr = errors.New("ssh: publickey auth not configured") break } @@ -562,11 +604,18 @@ userAuthLoop: if !ok { candidate.user = s.user candidate.pubKeyData = pubKeyData - candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey) - if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { - candidate.result = checkSourceAddress( + candidate.perms, candidate.result = authConfig.PublicKeyCallback(s, pubKey) + _, isPartialSuccessError := candidate.result.(*PartialSuccessError) + + if (candidate.result == nil || isPartialSuccessError) && + candidate.perms != nil && + candidate.perms.CriticalOptions != nil && + candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { + if err := checkSourceAddress( s.RemoteAddr(), - candidate.perms.CriticalOptions[sourceAddressCriticalOption]) + candidate.perms.CriticalOptions[sourceAddressCriticalOption]); err != nil { + candidate.result = err + } } cache.add(candidate) } @@ -578,8 +627,8 @@ userAuthLoop: if len(payload) > 0 { return nil, parseError(msgUserAuthRequest) } - - if candidate.result == nil { + _, isPartialSuccessError := candidate.result.(*PartialSuccessError) + if candidate.result == nil || isPartialSuccessError { okMsg := userAuthPubKeyOkMsg{ Algo: algo, PubKey: pubKeyData, @@ -629,11 +678,11 @@ userAuthLoop: perms = candidate.perms } case "gssapi-with-mic": - if config.GSSAPIWithMICConfig == nil { + if authConfig.GSSAPIWithMICConfig == nil { authErr = errors.New("ssh: gssapi-with-mic auth not configured") break } - gssapiConfig := config.GSSAPIWithMICConfig + gssapiConfig := authConfig.GSSAPIWithMICConfig userAuthRequestGSSAPI, err := parseGSSAPIPayload(userAuthReq.Payload) if err != nil { return nil, parseError(msgUserAuthRequest) @@ -689,49 +738,70 @@ userAuthLoop: break userAuthLoop } - authFailures++ - if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries { - // If we have hit the max attempts, don't bother sending the - // final SSH_MSG_USERAUTH_FAILURE message, since there are - // no more authentication methods which can be attempted, - // and this message may cause the client to re-attempt - // authentication while we send the disconnect message. - // Continue, and trigger the disconnect at the start of - // the loop. - // - // The SSH specification is somewhat confusing about this, - // RFC 4252 Section 5.1 requires each authentication failure - // be responded to with a respective SSH_MSG_USERAUTH_FAILURE - // message, but Section 4 says the server should disconnect - // after some number of attempts, but it isn't explicit which - // message should take precedence (i.e. should there be a failure - // message than a disconnect message, or if we are going to - // disconnect, should we only send that message.) - // - // Either way, OpenSSH disconnects immediately after the last - // failed authnetication attempt, and given they are typically - // considered the golden implementation it seems reasonable - // to match that behavior. - continue + var failureMsg userAuthFailureMsg + + if partialSuccess, ok := authErr.(*PartialSuccessError); ok { + // After a partial success error we don't allow changing the user + // name and execute the NoClientAuthCallback. + partialSuccessReturned = true + + // In case a partial success is returned, the server may send + // a new set of authentication methods. + authConfig = partialSuccess.Next + + // Reset pubkey cache, as the new PublicKeyCallback might + // accept a different set of public keys. + cache = pubKeyCache{} + + // Send back a partial success message to the user. + failureMsg.PartialSuccess = true + } else { + // Allow initial attempt of 'none' without penalty. + if authFailures > 0 || userAuthReq.Method != "none" || noneAuthCount != 1 { + authFailures++ + } + if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries { + // If we have hit the max attempts, don't bother sending the + // final SSH_MSG_USERAUTH_FAILURE message, since there are + // no more authentication methods which can be attempted, + // and this message may cause the client to re-attempt + // authentication while we send the disconnect message. + // Continue, and trigger the disconnect at the start of + // the loop. + // + // The SSH specification is somewhat confusing about this, + // RFC 4252 Section 5.1 requires each authentication failure + // be responded to with a respective SSH_MSG_USERAUTH_FAILURE + // message, but Section 4 says the server should disconnect + // after some number of attempts, but it isn't explicit which + // message should take precedence (i.e. should there be a failure + // message than a disconnect message, or if we are going to + // disconnect, should we only send that message.) + // + // Either way, OpenSSH disconnects immediately after the last + // failed authentication attempt, and given they are typically + // considered the golden implementation it seems reasonable + // to match that behavior. + continue + } } - var failureMsg userAuthFailureMsg - if config.PasswordCallback != nil { + if authConfig.PasswordCallback != nil { failureMsg.Methods = append(failureMsg.Methods, "password") } - if config.PublicKeyCallback != nil { + if authConfig.PublicKeyCallback != nil { failureMsg.Methods = append(failureMsg.Methods, "publickey") } - if config.KeyboardInteractiveCallback != nil { + if authConfig.KeyboardInteractiveCallback != nil { failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") } - if config.GSSAPIWithMICConfig != nil && config.GSSAPIWithMICConfig.Server != nil && - config.GSSAPIWithMICConfig.AllowLogin != nil { + if authConfig.GSSAPIWithMICConfig != nil && authConfig.GSSAPIWithMICConfig.Server != nil && + authConfig.GSSAPIWithMICConfig.AllowLogin != nil { failureMsg.Methods = append(failureMsg.Methods, "gssapi-with-mic") } if len(failureMsg.Methods) == 0 { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") + return nil, errors.New("ssh: no authentication methods available") } if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil { diff --git a/vendor/golang.org/x/sys/unix/mmap_nomremap.go b/vendor/golang.org/x/sys/unix/mmap_nomremap.go index 4b68e597..7f602ffd 100644 --- a/vendor/golang.org/x/sys/unix/mmap_nomremap.go +++ b/vendor/golang.org/x/sys/unix/mmap_nomremap.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris +//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris || zos package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index b473038c..27c41b6f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -1520,6 +1520,14 @@ func (m *mmapper) Munmap(data []byte) (err error) { return nil } +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 6395a031..6525c62f 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -165,6 +165,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) +//sys DisconnectNamedPipe(pipe Handle) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState @@ -348,8 +349,19 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost //sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) //sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) +//sys ClearCommBreak(handle Handle) (err error) +//sys ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) +//sys EscapeCommFunction(handle Handle, dwFunc uint32) (err error) +//sys GetCommState(handle Handle, lpDCB *DCB) (err error) +//sys GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) //sys GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys PurgeComm(handle Handle, dwFlags uint32) (err error) +//sys SetCommBreak(handle Handle) (err error) +//sys SetCommMask(handle Handle, dwEvtMask uint32) (err error) +//sys SetCommState(handle Handle, lpDCB *DCB) (err error) //sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) +//sys WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) //sys GetActiveProcessorCount(groupNumber uint16) (ret uint32) //sys GetMaximumProcessorCount(groupNumber uint16) (ret uint32) //sys EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) = user32.EnumWindows @@ -1834,3 +1846,73 @@ func ResizePseudoConsole(pconsole Handle, size Coord) error { // accept arguments that can be casted to uintptr, and Coord can't. return resizePseudoConsole(pconsole, *((*uint32)(unsafe.Pointer(&size)))) } + +// DCB constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-dcb. +const ( + CBR_110 = 110 + CBR_300 = 300 + CBR_600 = 600 + CBR_1200 = 1200 + CBR_2400 = 2400 + CBR_4800 = 4800 + CBR_9600 = 9600 + CBR_14400 = 14400 + CBR_19200 = 19200 + CBR_38400 = 38400 + CBR_57600 = 57600 + CBR_115200 = 115200 + CBR_128000 = 128000 + CBR_256000 = 256000 + + DTR_CONTROL_DISABLE = 0x00000000 + DTR_CONTROL_ENABLE = 0x00000010 + DTR_CONTROL_HANDSHAKE = 0x00000020 + + RTS_CONTROL_DISABLE = 0x00000000 + RTS_CONTROL_ENABLE = 0x00001000 + RTS_CONTROL_HANDSHAKE = 0x00002000 + RTS_CONTROL_TOGGLE = 0x00003000 + + NOPARITY = 0 + ODDPARITY = 1 + EVENPARITY = 2 + MARKPARITY = 3 + SPACEPARITY = 4 + + ONESTOPBIT = 0 + ONE5STOPBITS = 1 + TWOSTOPBITS = 2 +) + +// EscapeCommFunction constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-escapecommfunction. +const ( + SETXOFF = 1 + SETXON = 2 + SETRTS = 3 + CLRRTS = 4 + SETDTR = 5 + CLRDTR = 6 + SETBREAK = 8 + CLRBREAK = 9 +) + +// PurgeComm constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-purgecomm. +const ( + PURGE_TXABORT = 0x0001 + PURGE_RXABORT = 0x0002 + PURGE_TXCLEAR = 0x0004 + PURGE_RXCLEAR = 0x0008 +) + +// SetCommMask constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-setcommmask. +const ( + EV_RXCHAR = 0x0001 + EV_RXFLAG = 0x0002 + EV_TXEMPTY = 0x0004 + EV_CTS = 0x0008 + EV_DSR = 0x0010 + EV_RLSD = 0x0020 + EV_BREAK = 0x0040 + EV_ERR = 0x0080 + EV_RING = 0x0100 +) diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 359780f6..d8cb71db 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -3380,3 +3380,27 @@ type BLOB struct { Size uint32 BlobData *byte } + +type ComStat struct { + Flags uint32 + CBInQue uint32 + CBOutQue uint32 +} + +type DCB struct { + DCBlength uint32 + BaudRate uint32 + Flags uint32 + wReserved uint16 + XonLim uint16 + XoffLim uint16 + ByteSize uint8 + Parity uint8 + StopBits uint8 + XonChar byte + XoffChar byte + ErrorChar byte + EofChar byte + EvtChar byte + wReserved1 uint16 +} diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index e8791c82..5c6035dd 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -188,6 +188,8 @@ var ( procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procClearCommBreak = modkernel32.NewProc("ClearCommBreak") + procClearCommError = modkernel32.NewProc("ClearCommError") procCloseHandle = modkernel32.NewProc("CloseHandle") procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") @@ -212,7 +214,9 @@ var ( procDeleteProcThreadAttributeList = modkernel32.NewProc("DeleteProcThreadAttributeList") procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") + procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") + procEscapeCommFunction = modkernel32.NewProc("EscapeCommFunction") procExitProcess = modkernel32.NewProc("ExitProcess") procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") procFindClose = modkernel32.NewProc("FindClose") @@ -236,6 +240,8 @@ var ( procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") procGetACP = modkernel32.NewProc("GetACP") procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") + procGetCommModemStatus = modkernel32.NewProc("GetCommModemStatus") + procGetCommState = modkernel32.NewProc("GetCommState") procGetCommTimeouts = modkernel32.NewProc("GetCommTimeouts") procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") @@ -322,6 +328,7 @@ var ( procProcess32NextW = modkernel32.NewProc("Process32NextW") procProcessIdToSessionId = modkernel32.NewProc("ProcessIdToSessionId") procPulseEvent = modkernel32.NewProc("PulseEvent") + procPurgeComm = modkernel32.NewProc("PurgeComm") procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") procQueryFullProcessImageNameW = modkernel32.NewProc("QueryFullProcessImageNameW") procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") @@ -335,6 +342,9 @@ var ( procResetEvent = modkernel32.NewProc("ResetEvent") procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") procResumeThread = modkernel32.NewProc("ResumeThread") + procSetCommBreak = modkernel32.NewProc("SetCommBreak") + procSetCommMask = modkernel32.NewProc("SetCommMask") + procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") @@ -342,7 +352,6 @@ var ( procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") - procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") procSetErrorMode = modkernel32.NewProc("SetErrorMode") procSetEvent = modkernel32.NewProc("SetEvent") @@ -351,6 +360,7 @@ var ( procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") procSetFilePointer = modkernel32.NewProc("SetFilePointer") procSetFileTime = modkernel32.NewProc("SetFileTime") + procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") procSetNamedPipeHandleState = modkernel32.NewProc("SetNamedPipeHandleState") @@ -361,6 +371,7 @@ var ( procSetStdHandle = modkernel32.NewProc("SetStdHandle") procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") + procSetupComm = modkernel32.NewProc("SetupComm") procSizeofResource = modkernel32.NewProc("SizeofResource") procSleepEx = modkernel32.NewProc("SleepEx") procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") @@ -379,6 +390,7 @@ var ( procVirtualQueryEx = modkernel32.NewProc("VirtualQueryEx") procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") procWTSGetActiveConsoleSessionId = modkernel32.NewProc("WTSGetActiveConsoleSessionId") + procWaitCommEvent = modkernel32.NewProc("WaitCommEvent") procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") @@ -1641,6 +1653,22 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) { return } +func ClearCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CloseHandle(handle Handle) (err error) { r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { @@ -1845,6 +1873,14 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff return } +func DisconnectNamedPipe(pipe Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { var _p0 uint32 if bInheritHandle { @@ -1857,6 +1893,14 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP return } +func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { + r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ExitProcess(exitcode uint32) { syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) return @@ -2058,6 +2102,22 @@ func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { return } +func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2810,6 +2870,14 @@ func PulseEvent(event Handle) (err error) { return } +func PurgeComm(handle Handle, dwFlags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) n = uint32(r0) @@ -2924,6 +2992,30 @@ func ResumeThread(thread Handle) (ret uint32, err error) { return } +func SetCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2989,14 +3081,6 @@ func SetEndOfFile(handle Handle) (err error) { return } -func SetFileValidData(handle Handle, validDataLength int64) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) if r1 == 0 { @@ -3060,6 +3144,14 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim return } +func SetFileValidData(handle Handle, validDataLength int64) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) if r1 == 0 { @@ -3145,6 +3237,14 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro return } +func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) size = uint32(r0) @@ -3291,6 +3391,14 @@ func WTSGetActiveConsoleSessionId() (sessionID uint32) { return } +func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { var _p0 uint32 if waitAll { diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/tools/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/tools/PATENTS b/vendor/golang.org/x/tools/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/tools/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/tools/cover/profile.go b/vendor/golang.org/x/tools/cover/profile.go new file mode 100644 index 00000000..47a9a541 --- /dev/null +++ b/vendor/golang.org/x/tools/cover/profile.go @@ -0,0 +1,266 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cover provides support for parsing coverage profiles +// generated by "go test -coverprofile=cover.out". +package cover // import "golang.org/x/tools/cover" + +import ( + "bufio" + "errors" + "fmt" + "io" + "math" + "os" + "sort" + "strconv" + "strings" +) + +// Profile represents the profiling data for a specific file. +type Profile struct { + FileName string + Mode string + Blocks []ProfileBlock +} + +// ProfileBlock represents a single block of profiling data. +type ProfileBlock struct { + StartLine, StartCol int + EndLine, EndCol int + NumStmt, Count int +} + +type byFileName []*Profile + +func (p byFileName) Len() int { return len(p) } +func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName } +func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// ParseProfiles parses profile data in the specified file and returns a +// Profile for each source file described therein. +func ParseProfiles(fileName string) ([]*Profile, error) { + pf, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer pf.Close() + return ParseProfilesFromReader(pf) +} + +// ParseProfilesFromReader parses profile data from the Reader and +// returns a Profile for each source file described therein. +func ParseProfilesFromReader(rd io.Reader) ([]*Profile, error) { + // First line is "mode: foo", where foo is "set", "count", or "atomic". + // Rest of file is in the format + // encoding/base64/base64.go:34.44,37.40 3 1 + // where the fields are: name.go:line.column,line.column numberOfStatements count + files := make(map[string]*Profile) + s := bufio.NewScanner(rd) + mode := "" + for s.Scan() { + line := s.Text() + if mode == "" { + const p = "mode: " + if !strings.HasPrefix(line, p) || line == p { + return nil, fmt.Errorf("bad mode line: %v", line) + } + mode = line[len(p):] + continue + } + fn, b, err := parseLine(line) + if err != nil { + return nil, fmt.Errorf("line %q doesn't match expected format: %v", line, err) + } + p := files[fn] + if p == nil { + p = &Profile{ + FileName: fn, + Mode: mode, + } + files[fn] = p + } + p.Blocks = append(p.Blocks, b) + } + if err := s.Err(); err != nil { + return nil, err + } + for _, p := range files { + sort.Sort(blocksByStart(p.Blocks)) + // Merge samples from the same location. + j := 1 + for i := 1; i < len(p.Blocks); i++ { + b := p.Blocks[i] + last := p.Blocks[j-1] + if b.StartLine == last.StartLine && + b.StartCol == last.StartCol && + b.EndLine == last.EndLine && + b.EndCol == last.EndCol { + if b.NumStmt != last.NumStmt { + return nil, fmt.Errorf("inconsistent NumStmt: changed from %d to %d", last.NumStmt, b.NumStmt) + } + if mode == "set" { + p.Blocks[j-1].Count |= b.Count + } else { + p.Blocks[j-1].Count += b.Count + } + continue + } + p.Blocks[j] = b + j++ + } + p.Blocks = p.Blocks[:j] + } + // Generate a sorted slice. + profiles := make([]*Profile, 0, len(files)) + for _, profile := range files { + profiles = append(profiles, profile) + } + sort.Sort(byFileName(profiles)) + return profiles, nil +} + +// parseLine parses a line from a coverage file. +// It is equivalent to the regex +// ^(.+):([0-9]+)\.([0-9]+),([0-9]+)\.([0-9]+) ([0-9]+) ([0-9]+)$ +// +// However, it is much faster: https://golang.org/cl/179377 +func parseLine(l string) (fileName string, block ProfileBlock, err error) { + end := len(l) + + b := ProfileBlock{} + b.Count, end, err = seekBack(l, ' ', end, "Count") + if err != nil { + return "", b, err + } + b.NumStmt, end, err = seekBack(l, ' ', end, "NumStmt") + if err != nil { + return "", b, err + } + b.EndCol, end, err = seekBack(l, '.', end, "EndCol") + if err != nil { + return "", b, err + } + b.EndLine, end, err = seekBack(l, ',', end, "EndLine") + if err != nil { + return "", b, err + } + b.StartCol, end, err = seekBack(l, '.', end, "StartCol") + if err != nil { + return "", b, err + } + b.StartLine, end, err = seekBack(l, ':', end, "StartLine") + if err != nil { + return "", b, err + } + fn := l[0:end] + if fn == "" { + return "", b, errors.New("a FileName cannot be blank") + } + return fn, b, nil +} + +// seekBack searches backwards from end to find sep in l, then returns the +// value between sep and end as an integer. +// If seekBack fails, the returned error will reference what. +func seekBack(l string, sep byte, end int, what string) (value int, nextSep int, err error) { + // Since we're seeking backwards and we know only ASCII is legal for these values, + // we can ignore the possibility of non-ASCII characters. + for start := end - 1; start >= 0; start-- { + if l[start] == sep { + i, err := strconv.Atoi(l[start+1 : end]) + if err != nil { + return 0, 0, fmt.Errorf("couldn't parse %q: %v", what, err) + } + if i < 0 { + return 0, 0, fmt.Errorf("negative values are not allowed for %s, found %d", what, i) + } + return i, start, nil + } + } + return 0, 0, fmt.Errorf("couldn't find a %s before %s", string(sep), what) +} + +type blocksByStart []ProfileBlock + +func (b blocksByStart) Len() int { return len(b) } +func (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b blocksByStart) Less(i, j int) bool { + bi, bj := b[i], b[j] + return bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol +} + +// Boundary represents the position in a source file of the beginning or end of a +// block as reported by the coverage profile. In HTML mode, it will correspond to +// the opening or closing of a tag and will be used to colorize the source +type Boundary struct { + Offset int // Location as a byte offset in the source file. + Start bool // Is this the start of a block? + Count int // Event count from the cover profile. + Norm float64 // Count normalized to [0..1]. + Index int // Order in input file. +} + +// Boundaries returns a Profile as a set of Boundary objects within the provided src. +func (p *Profile) Boundaries(src []byte) (boundaries []Boundary) { + // Find maximum count. + max := 0 + for _, b := range p.Blocks { + if b.Count > max { + max = b.Count + } + } + // Divisor for normalization. + divisor := math.Log(float64(max)) + + // boundary returns a Boundary, populating the Norm field with a normalized Count. + index := 0 + boundary := func(offset int, start bool, count int) Boundary { + b := Boundary{Offset: offset, Start: start, Count: count, Index: index} + index++ + if !start || count == 0 { + return b + } + if max <= 1 { + b.Norm = 0.8 // Profile is in"set" mode; we want a heat map. Use cov8 in the CSS. + } else if count > 0 { + b.Norm = math.Log(float64(count)) / divisor + } + return b + } + + line, col := 1, 2 // TODO: Why is this 2? + for si, bi := 0, 0; si < len(src) && bi < len(p.Blocks); { + b := p.Blocks[bi] + if b.StartLine == line && b.StartCol == col { + boundaries = append(boundaries, boundary(si, true, b.Count)) + } + if b.EndLine == line && b.EndCol == col || line > b.EndLine { + boundaries = append(boundaries, boundary(si, false, 0)) + bi++ + continue // Don't advance through src; maybe the next block starts here. + } + if src[si] == '\n' { + line++ + col = 0 + } + col++ + si++ + } + sort.Sort(boundariesByPos(boundaries)) + return +} + +type boundariesByPos []Boundary + +func (b boundariesByPos) Len() int { return len(b) } +func (b boundariesByPos) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b boundariesByPos) Less(i, j int) bool { + if b[i].Offset == b[j].Offset { + // Boundaries at the same offset should be ordered according to + // their original position. + return b[i].Index < b[j].Index + } + return b[i].Offset < b[j].Offset +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go new file mode 100644 index 00000000..1fc1de0b --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -0,0 +1,220 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package inspector provides helper functions for traversal over the +// syntax trees of a package, including node filtering by type, and +// materialization of the traversal stack. +// +// During construction, the inspector does a complete traversal and +// builds a list of push/pop events and their node type. Subsequent +// method calls that request a traversal scan this list, rather than walk +// the AST, and perform type filtering using efficient bit sets. +// +// Experiments suggest the inspector's traversals are about 2.5x faster +// than ast.Inspect, but it may take around 5 traversals for this +// benefit to amortize the inspector's construction cost. +// If efficiency is the primary concern, do not use Inspector for +// one-off traversals. +package inspector + +// There are four orthogonal features in a traversal: +// 1 type filtering +// 2 pruning +// 3 postorder calls to f +// 4 stack +// Rather than offer all of them in the API, +// only a few combinations are exposed: +// - Preorder is the fastest and has fewest features, +// but is the most commonly needed traversal. +// - Nodes and WithStack both provide pruning and postorder calls, +// even though few clients need it, because supporting two versions +// is not justified. +// More combinations could be supported by expressing them as +// wrappers around a more generic traversal, but this was measured +// and found to degrade performance significantly (30%). + +import ( + "go/ast" +) + +// An Inspector provides methods for inspecting +// (traversing) the syntax trees of a package. +type Inspector struct { + events []event +} + +// New returns an Inspector for the specified syntax trees. +func New(files []*ast.File) *Inspector { + return &Inspector{traverse(files)} +} + +// An event represents a push or a pop +// of an ast.Node during a traversal. +type event struct { + node ast.Node + typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events + index int // index of corresponding push or pop event +} + +// TODO: Experiment with storing only the second word of event.node (unsafe.Pointer). +// Type can be recovered from the sole bit in typ. + +// Preorder visits all the nodes of the files supplied to New in +// depth-first order. It calls f(n) for each node n before it visits +// n's children. +// +// The complete traversal sequence is determined by ast.Inspect. +// The types argument, if non-empty, enables type-based filtering of +// events. The function f is called only for nodes whose type +// matches an element of the types slice. +func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { + // Because it avoids postorder calls to f, and the pruning + // check, Preorder is almost twice as fast as Nodes. The two + // features seem to contribute similar slowdowns (~1.4x each). + + mask := maskOf(types) + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.index > i { + // push + if ev.typ&mask != 0 { + f(ev.node) + } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } + } + i++ + } +} + +// Nodes visits the nodes of the files supplied to New in depth-first +// order. It calls f(n, true) for each node n before it visits n's +// children. If f returns true, Nodes invokes f recursively for each +// of the non-nil children of the node, followed by a call of +// f(n, false). +// +// The complete traversal sequence is determined by ast.Inspect. +// The types argument, if non-empty, enables type-based filtering of +// events. The function f if is called only for nodes whose type +// matches an element of the types slice. +func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) { + mask := maskOf(types) + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.index > i { + // push + pop := ev.index + if ev.typ&mask != 0 { + if !f(ev.node, true) { + i = pop + 1 // jump to corresponding pop + 1 + continue + } + } + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them. + i = pop + continue + } + } else { + // pop + push := ev.index + if in.events[push].typ&mask != 0 { + f(ev.node, false) + } + } + i++ + } +} + +// WithStack visits nodes in a similar manner to Nodes, but it +// supplies each call to f an additional argument, the current +// traversal stack. The stack's first element is the outermost node, +// an *ast.File; its last is the innermost, n. +func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) { + mask := maskOf(types) + var stack []ast.Node + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.index > i { + // push + pop := ev.index + stack = append(stack, ev.node) + if ev.typ&mask != 0 { + if !f(ev.node, true, stack) { + i = pop + 1 + stack = stack[:len(stack)-1] + continue + } + } + if in.events[pop].typ&mask == 0 { + // Subtrees does not contain types: skip them. + i = pop + continue + } + } else { + // pop + push := ev.index + if in.events[push].typ&mask != 0 { + f(ev.node, false, stack) + } + stack = stack[:len(stack)-1] + } + i++ + } +} + +// traverse builds the table of events representing a traversal. +func traverse(files []*ast.File) []event { + // Preallocate approximate number of events + // based on source file extent. + // This makes traverse faster by 4x (!). + var extent int + for _, f := range files { + extent += int(f.End() - f.Pos()) + } + // This estimate is based on the net/http package. + capacity := extent * 33 / 100 + if capacity > 1e6 { + capacity = 1e6 // impose some reasonable maximum + } + events := make([]event, 0, capacity) + + var stack []event + stack = append(stack, event{}) // include an extra event so file nodes have a parent + for _, f := range files { + ast.Inspect(f, func(n ast.Node) bool { + if n != nil { + // push + ev := event{ + node: n, + typ: 0, // temporarily used to accumulate type bits of subtree + index: len(events), // push event temporarily holds own index + } + stack = append(stack, ev) + events = append(events, ev) + } else { + // pop + top := len(stack) - 1 + ev := stack[top] + typ := typeOf(ev.node) + push := ev.index + parent := top - 1 + + events[push].typ = typ // set type of push + stack[parent].typ |= typ | ev.typ // parent's typ contains push and pop's typs. + events[push].index = len(events) // make push refer to pop + + stack = stack[:top] + events = append(events, ev) + } + return true + }) + } + + return events +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go new file mode 100644 index 00000000..2a872f89 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -0,0 +1,227 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inspector + +// This file defines func typeOf(ast.Node) uint64. +// +// The initial map-based implementation was too slow; +// see https://go-review.googlesource.com/c/tools/+/135655/1/go/ast/inspector/inspector.go#196 + +import ( + "go/ast" + "math" +) + +const ( + nArrayType = iota + nAssignStmt + nBadDecl + nBadExpr + nBadStmt + nBasicLit + nBinaryExpr + nBlockStmt + nBranchStmt + nCallExpr + nCaseClause + nChanType + nCommClause + nComment + nCommentGroup + nCompositeLit + nDeclStmt + nDeferStmt + nEllipsis + nEmptyStmt + nExprStmt + nField + nFieldList + nFile + nForStmt + nFuncDecl + nFuncLit + nFuncType + nGenDecl + nGoStmt + nIdent + nIfStmt + nImportSpec + nIncDecStmt + nIndexExpr + nIndexListExpr + nInterfaceType + nKeyValueExpr + nLabeledStmt + nMapType + nPackage + nParenExpr + nRangeStmt + nReturnStmt + nSelectStmt + nSelectorExpr + nSendStmt + nSliceExpr + nStarExpr + nStructType + nSwitchStmt + nTypeAssertExpr + nTypeSpec + nTypeSwitchStmt + nUnaryExpr + nValueSpec +) + +// typeOf returns a distinct single-bit value that represents the type of n. +// +// Various implementations were benchmarked with BenchmarkNewInspector: +// +// GOGC=off +// - type switch 4.9-5.5ms 2.1ms +// - binary search over a sorted list of types 5.5-5.9ms 2.5ms +// - linear scan, frequency-ordered list 5.9-6.1ms 2.7ms +// - linear scan, unordered list 6.4ms 2.7ms +// - hash table 6.5ms 3.1ms +// +// A perfect hash seemed like overkill. +// +// The compiler's switch statement is the clear winner +// as it produces a binary tree in code, +// with constant conditions and good branch prediction. +// (Sadly it is the most verbose in source code.) +// Binary search suffered from poor branch prediction. +func typeOf(n ast.Node) uint64 { + // Fast path: nearly half of all nodes are identifiers. + if _, ok := n.(*ast.Ident); ok { + return 1 << nIdent + } + + // These cases include all nodes encountered by ast.Inspect. + switch n.(type) { + case *ast.ArrayType: + return 1 << nArrayType + case *ast.AssignStmt: + return 1 << nAssignStmt + case *ast.BadDecl: + return 1 << nBadDecl + case *ast.BadExpr: + return 1 << nBadExpr + case *ast.BadStmt: + return 1 << nBadStmt + case *ast.BasicLit: + return 1 << nBasicLit + case *ast.BinaryExpr: + return 1 << nBinaryExpr + case *ast.BlockStmt: + return 1 << nBlockStmt + case *ast.BranchStmt: + return 1 << nBranchStmt + case *ast.CallExpr: + return 1 << nCallExpr + case *ast.CaseClause: + return 1 << nCaseClause + case *ast.ChanType: + return 1 << nChanType + case *ast.CommClause: + return 1 << nCommClause + case *ast.Comment: + return 1 << nComment + case *ast.CommentGroup: + return 1 << nCommentGroup + case *ast.CompositeLit: + return 1 << nCompositeLit + case *ast.DeclStmt: + return 1 << nDeclStmt + case *ast.DeferStmt: + return 1 << nDeferStmt + case *ast.Ellipsis: + return 1 << nEllipsis + case *ast.EmptyStmt: + return 1 << nEmptyStmt + case *ast.ExprStmt: + return 1 << nExprStmt + case *ast.Field: + return 1 << nField + case *ast.FieldList: + return 1 << nFieldList + case *ast.File: + return 1 << nFile + case *ast.ForStmt: + return 1 << nForStmt + case *ast.FuncDecl: + return 1 << nFuncDecl + case *ast.FuncLit: + return 1 << nFuncLit + case *ast.FuncType: + return 1 << nFuncType + case *ast.GenDecl: + return 1 << nGenDecl + case *ast.GoStmt: + return 1 << nGoStmt + case *ast.Ident: + return 1 << nIdent + case *ast.IfStmt: + return 1 << nIfStmt + case *ast.ImportSpec: + return 1 << nImportSpec + case *ast.IncDecStmt: + return 1 << nIncDecStmt + case *ast.IndexExpr: + return 1 << nIndexExpr + case *ast.IndexListExpr: + return 1 << nIndexListExpr + case *ast.InterfaceType: + return 1 << nInterfaceType + case *ast.KeyValueExpr: + return 1 << nKeyValueExpr + case *ast.LabeledStmt: + return 1 << nLabeledStmt + case *ast.MapType: + return 1 << nMapType + case *ast.Package: + return 1 << nPackage + case *ast.ParenExpr: + return 1 << nParenExpr + case *ast.RangeStmt: + return 1 << nRangeStmt + case *ast.ReturnStmt: + return 1 << nReturnStmt + case *ast.SelectStmt: + return 1 << nSelectStmt + case *ast.SelectorExpr: + return 1 << nSelectorExpr + case *ast.SendStmt: + return 1 << nSendStmt + case *ast.SliceExpr: + return 1 << nSliceExpr + case *ast.StarExpr: + return 1 << nStarExpr + case *ast.StructType: + return 1 << nStructType + case *ast.SwitchStmt: + return 1 << nSwitchStmt + case *ast.TypeAssertExpr: + return 1 << nTypeAssertExpr + case *ast.TypeSpec: + return 1 << nTypeSpec + case *ast.TypeSwitchStmt: + return 1 << nTypeSwitchStmt + case *ast.UnaryExpr: + return 1 << nUnaryExpr + case *ast.ValueSpec: + return 1 << nValueSpec + } + return 0 +} + +func maskOf(nodes []ast.Node) uint64 { + if nodes == nil { + return math.MaxUint64 // match all node types + } + var mask uint64 + for _, n := range nodes { + mask |= typeOf(n) + } + return mask +} diff --git a/vendor/google.golang.org/protobuf/protoadapt/convert.go b/vendor/google.golang.org/protobuf/protoadapt/convert.go new file mode 100644 index 00000000..ea276d15 --- /dev/null +++ b/vendor/google.golang.org/protobuf/protoadapt/convert.go @@ -0,0 +1,31 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoadapt bridges the original and new proto APIs. +package protoadapt + +import ( + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// MessageV1 is the original [github.com/golang/protobuf/proto.Message] type. +type MessageV1 = protoiface.MessageV1 + +// MessageV2 is the [google.golang.org/protobuf/proto.Message] type used by the +// current [google.golang.org/protobuf] module, adding support for reflection. +type MessageV2 = proto.Message + +// MessageV1Of converts a v2 message to a v1 message. +// It returns nil if m is nil. +func MessageV1Of(m MessageV2) MessageV1 { + return protoimpl.X.ProtoMessageV1Of(m) +} + +// MessageV2Of converts a v1 message to a v2 message. +// It returns nil if m is nil. +func MessageV2Of(m MessageV1) MessageV2 { + return protoimpl.X.ProtoMessageV2Of(m) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index b6bc4925..f6e5095c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,6 +1,17 @@ +# code.cloudfoundry.org/bbs v0.0.0-20240521125508-20d3971ce31b +## explicit +code.cloudfoundry.org/bbs +code.cloudfoundry.org/bbs/encryption +code.cloudfoundry.org/bbs/events +code.cloudfoundry.org/bbs/format +code.cloudfoundry.org/bbs/models +code.cloudfoundry.org/bbs/trace # code.cloudfoundry.org/bytefmt v0.0.0-20230612151507-41ef4d1f67a4 ## explicit; go 1.20 code.cloudfoundry.org/bytefmt +# code.cloudfoundry.org/cfhttp/v2 v2.1.0 +## explicit; go 1.19 +code.cloudfoundry.org/cfhttp/v2 # code.cloudfoundry.org/cfnetworking-cli-api v0.0.0-20190103195135-4b04f26287a6 ## explicit code.cloudfoundry.org/cfnetworking-cli-api/cfnetworking @@ -75,6 +86,12 @@ code.cloudfoundry.org/jsonry code.cloudfoundry.org/jsonry/internal/errorcontext code.cloudfoundry.org/jsonry/internal/path code.cloudfoundry.org/jsonry/internal/tree +# code.cloudfoundry.org/lager/v3 v3.0.3 +## explicit; go 1.19 +code.cloudfoundry.org/lager/v3 +code.cloudfoundry.org/lager/v3/internal/truncate +# code.cloudfoundry.org/locket v0.0.0-20240521151413-b344fdd15d03 +## explicit # code.cloudfoundry.org/tlsconfig v0.0.0-20230612153104-23c0622de227 ## explicit; go 1.19 code.cloudfoundry.org/tlsconfig @@ -139,13 +156,22 @@ github.com/fatih/color # github.com/fsnotify/fsnotify v1.6.0 ## explicit; go 1.16 github.com/fsnotify/fsnotify -# github.com/go-logr/logr v1.3.0 +# github.com/go-logr/logr v1.4.1 ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/slogr +# github.com/go-sql-driver/mysql v1.8.1 +## explicit; go 1.18 +# github.com/go-task/slim-sprig/v3 v3.0.0 +## explicit; go 1.20 +github.com/go-task/slim-sprig/v3 +# github.com/go-test/deep v1.1.0 +## explicit; go 1.16 # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 +github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto +github.com/gogo/protobuf/protoc-gen-gogo/descriptor github.com/gogo/protobuf/sortkeys # github.com/golang/protobuf v1.5.3 ## explicit; go 1.9 @@ -166,6 +192,9 @@ github.com/google/go-cmp/cmp/internal/value ## explicit; go 1.12 github.com/google/gofuzz github.com/google/gofuzz/bytesource +# github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 +## explicit; go 1.19 +github.com/google/pprof/profile # github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 ## explicit; go 1.19 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule @@ -174,6 +203,8 @@ github.com/grpc-ecosystem/grpc-gateway/v2/utilities # github.com/imdario/mergo v0.3.15 ## explicit; go 1.13 github.com/imdario/mergo +# github.com/jackc/pgx/v5 v5.6.0 +## explicit; go 1.20 # github.com/jessevdk/go-flags v1.5.0 ## explicit; go 1.15 github.com/jessevdk/go-flags @@ -234,7 +265,25 @@ github.com/onsi/ginkgo/reporters/stenographer github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty github.com/onsi/ginkgo/types -# github.com/onsi/gomega v1.31.0 +# github.com/onsi/ginkgo/v2 v2.17.3 +## explicit; go 1.20 +github.com/onsi/ginkgo/v2/config +github.com/onsi/ginkgo/v2/formatter +github.com/onsi/ginkgo/v2/ginkgo +github.com/onsi/ginkgo/v2/ginkgo/build +github.com/onsi/ginkgo/v2/ginkgo/command +github.com/onsi/ginkgo/v2/ginkgo/generators +github.com/onsi/ginkgo/v2/ginkgo/internal +github.com/onsi/ginkgo/v2/ginkgo/labels +github.com/onsi/ginkgo/v2/ginkgo/outline +github.com/onsi/ginkgo/v2/ginkgo/run +github.com/onsi/ginkgo/v2/ginkgo/unfocus +github.com/onsi/ginkgo/v2/ginkgo/watch +github.com/onsi/ginkgo/v2/internal/interrupt_handler +github.com/onsi/ginkgo/v2/internal/parallel_support +github.com/onsi/ginkgo/v2/reporters +github.com/onsi/ginkgo/v2/types +# github.com/onsi/gomega v1.33.0 ## explicit; go 1.20 github.com/onsi/gomega github.com/onsi/gomega/format @@ -247,6 +296,10 @@ github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types +# github.com/openzipkin/zipkin-go v0.4.2 +## explicit; go 1.18 +github.com/openzipkin/zipkin-go/idgenerator +github.com/openzipkin/zipkin-go/model # github.com/prometheus/client_golang v1.17.0 ## explicit; go 1.19 github.com/prometheus/client_golang/prometheus @@ -285,7 +338,10 @@ github.com/tedsuo/rata ## explicit; go 1.12 github.com/vito/go-interact/interact github.com/vito/go-interact/interact/terminal -# golang.org/x/crypto v0.21.0 +# github.com/vito/go-sse v1.0.0 +## explicit; go 1.12 +github.com/vito/go-sse/sse +# golang.org/x/crypto v0.22.0 ## explicit; go 1.18 golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 @@ -296,7 +352,7 @@ golang.org/x/crypto/internal/poly1305 golang.org/x/crypto/ssh golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/terminal -# golang.org/x/net v0.23.0 +# golang.org/x/net v0.24.0 ## explicit; go 1.18 golang.org/x/net/html golang.org/x/net/html/atom @@ -313,13 +369,13 @@ golang.org/x/net/trace ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sys v0.18.0 +# golang.org/x/sys v0.19.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.18.0 +# golang.org/x/term v0.19.0 ## explicit; go 1.18 golang.org/x/term # golang.org/x/text v0.14.0 @@ -347,6 +403,10 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate +# golang.org/x/tools v0.20.0 +## explicit; go 1.19 +golang.org/x/tools/cover +golang.org/x/tools/go/ast/inspector # google.golang.org/appengine v1.6.8 ## explicit; go 1.11 google.golang.org/appengine/internal @@ -447,6 +507,7 @@ google.golang.org/protobuf/internal/set google.golang.org/protobuf/internal/strs google.golang.org/protobuf/internal/version google.golang.org/protobuf/proto +google.golang.org/protobuf/protoadapt google.golang.org/protobuf/reflect/protodesc google.golang.org/protobuf/reflect/protoreflect google.golang.org/protobuf/reflect/protoregistry