From d8464f951f917fdc21e12c04fdd76ab7c51623da Mon Sep 17 00:00:00 2001 From: jkito Date: Wed, 4 Dec 2024 13:31:43 +0530 Subject: [PATCH] update obfsvpn go module to v1.3.1-0.20241121155258-e6b06efc4456 --- go.mod | 11 +- go.sum | 26 +- .../0xacab.org/leap/obfsvpn/client/client.go | 244 +- .../0xacab.org/leap/obfsvpn/obfsvpn/dialer.go | 2 +- vendor/0xacab.org/leap/obfsvpn/obfsvpn/hop.go | 6 +- .../leap/obfsvpn/obfsvpn/listener.go | 33 +- .../0xacab.org/leap/obfsvpn/obfsvpn/quic.go | 92 + .../go-task/slim-sprig/.editorconfig | 14 + .../go-task/slim-sprig/.gitattributes | 1 + .../github.com/go-task/slim-sprig/.gitignore | 2 + .../go-task/slim-sprig/CHANGELOG.md | 364 + .../github.com/go-task/slim-sprig/LICENSE.txt | 19 + .../github.com/go-task/slim-sprig/README.md | 73 + .../go-task/slim-sprig/Taskfile.yml | 12 + .../github.com/go-task/slim-sprig/crypto.go | 24 + vendor/github.com/go-task/slim-sprig/date.go | 152 + .../github.com/go-task/slim-sprig/defaults.go | 163 + vendor/github.com/go-task/slim-sprig/dict.go | 118 + vendor/github.com/go-task/slim-sprig/doc.go | 19 + .../go-task/slim-sprig/functions.go | 317 + vendor/github.com/go-task/slim-sprig/list.go | 464 + .../github.com/go-task/slim-sprig/network.go | 12 + .../github.com/go-task/slim-sprig/numeric.go | 228 + .../github.com/go-task/slim-sprig/reflect.go | 28 + vendor/github.com/go-task/slim-sprig/regex.go | 83 + .../github.com/go-task/slim-sprig/strings.go | 189 + vendor/github.com/go-task/slim-sprig/url.go | 66 + vendor/github.com/google/pprof/AUTHORS | 7 + vendor/github.com/google/pprof/CONTRIBUTORS | 16 + vendor/github.com/google/pprof/LICENSE | 202 + .../github.com/google/pprof/profile/encode.go | 588 + .../github.com/google/pprof/profile/filter.go | 274 + .../github.com/google/pprof/profile/index.go | 64 + .../pprof/profile/legacy_java_profile.go | 315 + .../google/pprof/profile/legacy_profile.go | 1228 ++ .../github.com/google/pprof/profile/merge.go | 667 + .../google/pprof/profile/profile.go | 856 + .../github.com/google/pprof/profile/proto.go | 367 + .../github.com/google/pprof/profile/prune.go | 194 + vendor/github.com/onsi/ginkgo/v2/LICENSE | 20 + .../onsi/ginkgo/v2/config/deprecated.go | 69 + .../ginkgo/v2/formatter/colorable_others.go | 41 + .../ginkgo/v2/formatter/colorable_windows.go | 809 + .../onsi/ginkgo/v2/formatter/formatter.go | 230 + .../onsi/ginkgo/v2/ginkgo/command/abort.go | 61 + .../onsi/ginkgo/v2/ginkgo/command/command.go | 50 + .../onsi/ginkgo/v2/ginkgo/command/program.go | 182 + .../ginkgo/generators/boostrap_templates.go | 48 + .../v2/ginkgo/generators/bootstrap_command.go | 133 + .../v2/ginkgo/generators/generate_command.go | 264 + .../ginkgo/generators/generate_templates.go | 43 + .../v2/ginkgo/generators/generators_common.go | 76 + .../onsi/ginkgo/v2/ginkgo/internal/compile.go | 161 + .../ginkgo/internal/profiles_and_reports.go | 237 + .../onsi/ginkgo/v2/ginkgo/internal/run.go | 355 + .../ginkgo/v2/ginkgo/internal/test_suite.go | 284 + .../onsi/ginkgo/v2/ginkgo/internal/utils.go | 86 + .../v2/ginkgo/internal/verify_version.go | 54 + .../ginkgo/v2/ginkgo/labels/labels_command.go | 123 + .../github.com/onsi/ginkgo/v2/ginkgo/main.go | 58 + .../onsi/ginkgo/v2/ginkgo/outline/ginkgo.go | 300 + .../onsi/ginkgo/v2/ginkgo/outline/import.go | 65 + .../onsi/ginkgo/v2/ginkgo/outline/outline.go | 110 + .../v2/ginkgo/outline/outline_command.go | 98 + .../onsi/ginkgo/v2/ginkgo/run/run_command.go | 232 + .../v2/ginkgo/unfocus/unfocus_command.go | 186 + .../onsi/ginkgo/v2/ginkgo/watch/delta.go | 22 + .../ginkgo/v2/ginkgo/watch/delta_tracker.go | 75 + .../ginkgo/v2/ginkgo/watch/dependencies.go | 92 + .../ginkgo/v2/ginkgo/watch/package_hash.go | 108 + .../ginkgo/v2/ginkgo/watch/package_hashes.go | 85 + .../onsi/ginkgo/v2/ginkgo/watch/suite.go | 87 + .../ginkgo/v2/ginkgo/watch/watch_command.go | 192 + .../interrupt_handler/interrupt_handler.go | 177 + .../sigquit_swallower_unix.go | 15 + .../sigquit_swallower_windows.go | 8 + .../parallel_support/client_server.go | 72 + .../internal/parallel_support/http_client.go | 169 + .../internal/parallel_support/http_server.go | 242 + .../internal/parallel_support/rpc_client.go | 136 + .../internal/parallel_support/rpc_server.go | 75 + .../parallel_support/server_handler.go | 234 + .../ginkgo/v2/reporters/default_reporter.go | 759 + .../v2/reporters/deprecated_reporter.go | 149 + .../onsi/ginkgo/v2/reporters/json_report.go | 69 + .../onsi/ginkgo/v2/reporters/junit_report.go | 376 + .../onsi/ginkgo/v2/reporters/reporter.go | 29 + .../ginkgo/v2/reporters/teamcity_report.go | 105 + .../onsi/ginkgo/v2/types/code_location.go | 159 + .../github.com/onsi/ginkgo/v2/types/config.go | 758 + .../onsi/ginkgo/v2/types/deprecated_types.go | 141 + .../ginkgo/v2/types/deprecation_support.go | 177 + .../onsi/ginkgo/v2/types/enum_support.go | 43 + .../github.com/onsi/ginkgo/v2/types/errors.go | 630 + .../onsi/ginkgo/v2/types/file_filter.go | 106 + .../github.com/onsi/ginkgo/v2/types/flags.go | 489 + .../onsi/ginkgo/v2/types/label_filter.go | 358 + .../onsi/ginkgo/v2/types/report_entry.go | 190 + .../github.com/onsi/ginkgo/v2/types/types.go | 914 + .../onsi/ginkgo/v2/types/version.go | 3 + vendor/github.com/quic-go/quic-go/.gitignore | 18 + .../github.com/quic-go/quic-go/.golangci.yml | 45 + .../github.com/quic-go/quic-go/Changelog.md | 109 + vendor/github.com/quic-go/quic-go/LICENSE | 21 + vendor/github.com/quic-go/quic-go/README.md | 50 + vendor/github.com/quic-go/quic-go/SECURITY.md | 19 + .../github.com/quic-go/quic-go/buffer_pool.go | 92 + vendor/github.com/quic-go/quic-go/client.go | 251 + .../github.com/quic-go/quic-go/closed_conn.go | 57 + vendor/github.com/quic-go/quic-go/codecov.yml | 16 + vendor/github.com/quic-go/quic-go/config.go | 128 + .../quic-go/quic-go/conn_id_generator.go | 138 + .../quic-go/quic-go/conn_id_manager.go | 214 + .../github.com/quic-go/quic-go/connection.go | 2333 +++ .../quic-go/quic-go/connection_logging.go | 173 + .../quic-go/quic-go/connection_timer.go | 51 + .../quic-go/quic-go/crypto_stream.go | 83 + .../quic-go/quic-go/crypto_stream_manager.go | 77 + .../quic-go/quic-go/datagram_queue.go | 137 + vendor/github.com/quic-go/quic-go/errors.go | 75 + .../quic-go/quic-go/frame_sorter.go | 237 + vendor/github.com/quic-go/quic-go/framer.go | 240 + .../github.com/quic-go/quic-go/interface.go | 371 + .../internal/ackhandler/ack_eliciting.go | 20 + .../quic-go/internal/ackhandler/ackhandler.go | 24 + .../quic-go/internal/ackhandler/ecn.go | 296 + .../quic-go/internal/ackhandler/frame.go | 21 + .../quic-go/internal/ackhandler/interfaces.go | 53 + .../quic-go/internal/ackhandler/mockgen.go | 9 + .../quic-go/internal/ackhandler/packet.go | 55 + .../ackhandler/packet_number_generator.go | 84 + .../ackhandler/received_packet_handler.go | 125 + .../ackhandler/received_packet_history.go | 141 + .../ackhandler/received_packet_tracker.go | 220 + .../quic-go/internal/ackhandler/send_mode.go | 46 + .../ackhandler/sent_packet_handler.go | 928 + .../ackhandler/sent_packet_history.go | 182 + .../quic-go/internal/congestion/bandwidth.go | 25 + .../quic-go/internal/congestion/clock.go | 18 + .../quic-go/internal/congestion/cubic.go | 213 + .../internal/congestion/cubic_sender.go | 316 + .../internal/congestion/hybrid_slow_start.go | 112 + .../quic-go/internal/congestion/interface.go | 28 + .../quic-go/internal/congestion/pacer.go | 84 + .../flowcontrol/base_flow_controller.go | 127 + .../flowcontrol/connection_flow_controller.go | 104 + .../quic-go/internal/flowcontrol/interface.go | 43 + .../flowcontrol/stream_flow_controller.go | 149 + .../quic-go/internal/handshake/aead.go | 90 + .../internal/handshake/cipher_suite.go | 104 + .../internal/handshake/crypto_setup.go | 634 + .../internal/handshake/header_protector.go | 134 + .../quic-go/internal/handshake/hkdf.go | 29 + .../internal/handshake/initial_aead.go | 71 + .../quic-go/internal/handshake/interface.go | 117 + .../quic-go/internal/handshake/retry.go | 63 + .../internal/handshake/session_ticket.go | 54 + .../internal/handshake/token_generator.go | 120 + .../internal/handshake/token_protector.go | 74 + .../internal/handshake/updatable_aead.go | 332 + .../internal/protocol/connection_id.go | 116 + .../internal/protocol/encryption_level.go | 30 + .../quic-go/internal/protocol/key_phase.go | 36 + .../internal/protocol/packet_number.go | 57 + .../quic-go/internal/protocol/params.go | 180 + .../quic-go/internal/protocol/perspective.go | 26 + .../quic-go/internal/protocol/protocol.go | 152 + .../quic-go/internal/protocol/stream.go | 76 + .../quic-go/internal/protocol/version.go | 114 + .../quic-go/internal/qerr/error_codes.go | 87 + .../quic-go/quic-go/internal/qerr/errors.go | 139 + .../quic-go/internal/qtls/cipher_suite.go | 52 + .../internal/qtls/client_session_cache.go | 70 + .../quic-go/quic-go/internal/qtls/conn.go | 21 + .../quic-go/quic-go/internal/qtls/qtls.go | 150 + .../internal/utils/buffered_write_closer.go | 26 + .../internal/utils/linkedlist/README.md | 6 + .../internal/utils/linkedlist/linkedlist.go | 264 + .../quic-go/quic-go/internal/utils/log.go | 131 + .../quic-go/quic-go/internal/utils/rand.go | 29 + .../internal/utils/ringbuffer/ringbuffer.go | 96 + .../quic-go/internal/utils/rtt_stats.go | 131 + .../quic-go/quic-go/internal/utils/timer.go | 57 + .../quic-go/internal/wire/ack_frame.go | 275 + .../quic-go/internal/wire/ack_range.go | 14 + .../internal/wire/connection_close_frame.go | 75 + .../quic-go/internal/wire/crypto_frame.go | 97 + .../internal/wire/data_blocked_frame.go | 29 + .../quic-go/internal/wire/datagram_frame.go | 85 + .../quic-go/internal/wire/extended_header.go | 166 + .../quic-go/internal/wire/frame_parser.go | 197 + .../internal/wire/handshake_done_frame.go | 17 + .../quic-go/quic-go/internal/wire/header.go | 302 + .../quic-go/internal/wire/interface.go | 11 + .../quic-go/quic-go/internal/wire/log.go | 74 + .../quic-go/internal/wire/max_data_frame.go | 33 + .../internal/wire/max_stream_data_frame.go | 43 + .../internal/wire/max_streams_frame.go | 49 + .../internal/wire/new_connection_id_frame.go | 80 + .../quic-go/internal/wire/new_token_frame.go | 43 + .../internal/wire/path_challenge_frame.go | 32 + .../internal/wire/path_response_frame.go | 32 + .../quic-go/internal/wire/ping_frame.go | 17 + .../quic-go/quic-go/internal/wire/pool.go | 33 + .../internal/wire/reset_stream_frame.go | 55 + .../wire/retire_connection_id_frame.go | 30 + .../quic-go/internal/wire/short_header.go | 62 + .../internal/wire/stop_sending_frame.go | 45 + .../wire/stream_data_blocked_frame.go | 42 + .../quic-go/internal/wire/stream_frame.go | 188 + .../internal/wire/streams_blocked_frame.go | 49 + .../internal/wire/transport_parameters.go | 532 + .../internal/wire/version_negotiation.go | 53 + .../quic-go/logging/connection_tracer.go | 271 + .../quic-go/quic-go/logging/frame.go | 66 + .../quic-go/quic-go/logging/interface.go | 111 + .../quic-go/quic-go/logging/packet_header.go | 24 + .../quic-go/quic-go/logging/tracer.go | 59 + .../quic-go/quic-go/logging/types.go | 128 + vendor/github.com/quic-go/quic-go/mockgen.go | 69 + .../quic-go/quic-go/mtu_discoverer.go | 229 + .../github.com/quic-go/quic-go/multiplexer.go | 75 + vendor/github.com/quic-go/quic-go/oss-fuzz.sh | 42 + .../quic-go/quic-go/packet_handler_map.go | 255 + .../quic-go/quic-go/packet_packer.go | 914 + .../quic-go/quic-go/packet_unpacker.go | 223 + .../quic-go/quic-go/quicvarint/io.go | 68 + .../quic-go/quic-go/quicvarint/varint.go | 164 + .../quic-go/quic-go/receive_stream.go | 377 + .../quic-go/quic-go/retransmission_queue.go | 180 + .../github.com/quic-go/quic-go/send_conn.go | 103 + .../github.com/quic-go/quic-go/send_queue.go | 103 + .../github.com/quic-go/quic-go/send_stream.go | 593 + vendor/github.com/quic-go/quic-go/server.go | 925 + vendor/github.com/quic-go/quic-go/stream.go | 162 + .../github.com/quic-go/quic-go/streams_map.go | 334 + .../quic-go/quic-go/streams_map_incoming.go | 195 + .../quic-go/quic-go/streams_map_outgoing.go | 230 + vendor/github.com/quic-go/quic-go/sys_conn.go | 117 + .../quic-go/quic-go/sys_conn_buffers.go | 68 + .../quic-go/quic-go/sys_conn_buffers_write.go | 70 + .../github.com/quic-go/quic-go/sys_conn_df.go | 22 + .../quic-go/quic-go/sys_conn_df_darwin.go | 74 + .../quic-go/quic-go/sys_conn_df_linux.go | 42 + .../quic-go/quic-go/sys_conn_df_windows.go | 54 + .../quic-go/quic-go/sys_conn_helper_darwin.go | 38 + .../quic-go/sys_conn_helper_freebsd.go | 33 + .../quic-go/quic-go/sys_conn_helper_linux.go | 156 + .../quic-go/sys_conn_helper_nonlinux.go | 10 + .../quic-go/quic-go/sys_conn_no_oob.go | 21 + .../quic-go/quic-go/sys_conn_oob.go | 331 + .../quic-go/quic-go/sys_conn_windows.go | 42 + .../github.com/quic-go/quic-go/token_store.go | 116 + vendor/github.com/quic-go/quic-go/tools.go | 8 + .../github.com/quic-go/quic-go/transport.go | 554 + vendor/go.uber.org/mock/AUTHORS | 12 + vendor/go.uber.org/mock/LICENSE | 202 + .../go.uber.org/mock/mockgen/generic_go118.go | 130 + .../mock/mockgen/generic_notgo118.go | 41 + vendor/go.uber.org/mock/mockgen/mockgen.go | 883 + .../go.uber.org/mock/mockgen/model/model.go | 533 + vendor/go.uber.org/mock/mockgen/parse.go | 805 + vendor/go.uber.org/mock/mockgen/reflect.go | 256 + vendor/go.uber.org/mock/mockgen/version.go | 31 + vendor/golang.org/x/exp/LICENSE | 27 + vendor/golang.org/x/exp/PATENTS | 22 + vendor/golang.org/x/exp/rand/exp.go | 221 + vendor/golang.org/x/exp/rand/normal.go | 156 + vendor/golang.org/x/exp/rand/rand.go | 372 + vendor/golang.org/x/exp/rand/rng.go | 91 + vendor/golang.org/x/exp/rand/zipf.go | 77 + vendor/golang.org/x/mod/LICENSE | 27 + vendor/golang.org/x/mod/PATENTS | 22 + .../x/mod/internal/lazyregexp/lazyre.go | 78 + vendor/golang.org/x/mod/modfile/print.go | 184 + vendor/golang.org/x/mod/modfile/read.go | 958 + vendor/golang.org/x/mod/modfile/rule.go | 1666 ++ vendor/golang.org/x/mod/modfile/work.go | 285 + vendor/golang.org/x/mod/module/module.go | 841 + vendor/golang.org/x/mod/module/pseudo.go | 250 + vendor/golang.org/x/mod/semver/semver.go | 401 + vendor/golang.org/x/tools/LICENSE | 27 + vendor/golang.org/x/tools/PATENTS | 22 + .../x/tools/go/ast/astutil/enclosing.go | 634 + .../x/tools/go/ast/astutil/imports.go | 485 + .../x/tools/go/ast/astutil/rewrite.go | 486 + .../golang.org/x/tools/go/ast/astutil/util.go | 18 + .../x/tools/go/ast/inspector/inspector.go | 220 + .../x/tools/go/ast/inspector/typeof.go | 227 + vendor/golang.org/x/tools/imports/forward.go | 77 + .../x/tools/internal/event/core/event.go | 85 + .../x/tools/internal/event/core/export.go | 70 + .../x/tools/internal/event/core/fast.go | 77 + .../golang.org/x/tools/internal/event/doc.go | 7 + .../x/tools/internal/event/event.go | 127 + .../x/tools/internal/event/keys/keys.go | 564 + .../x/tools/internal/event/keys/standard.go | 22 + .../x/tools/internal/event/keys/util.go | 21 + .../x/tools/internal/event/label/label.go | 215 + .../x/tools/internal/gocommand/invoke.go | 470 + .../x/tools/internal/gocommand/vendor.go | 163 + .../x/tools/internal/gocommand/version.go | 71 + .../x/tools/internal/gopathwalk/walk.go | 337 + .../x/tools/internal/imports/fix.go | 1892 ++ .../x/tools/internal/imports/imports.go | 354 + .../x/tools/internal/imports/mod.go | 841 + .../x/tools/internal/imports/mod_cache.go | 331 + .../x/tools/internal/imports/sortimports.go | 297 + .../x/tools/internal/stdlib/manifest.go | 17320 ++++++++++++++++ .../x/tools/internal/stdlib/stdlib.go | 97 + vendor/modules.txt | 70 +- 311 files changed, 75882 insertions(+), 117 deletions(-) create mode 100644 vendor/0xacab.org/leap/obfsvpn/obfsvpn/quic.go create mode 100644 vendor/github.com/go-task/slim-sprig/.editorconfig create mode 100644 vendor/github.com/go-task/slim-sprig/.gitattributes create mode 100644 vendor/github.com/go-task/slim-sprig/.gitignore create mode 100644 vendor/github.com/go-task/slim-sprig/CHANGELOG.md create mode 100644 vendor/github.com/go-task/slim-sprig/LICENSE.txt create mode 100644 vendor/github.com/go-task/slim-sprig/README.md create mode 100644 vendor/github.com/go-task/slim-sprig/Taskfile.yml create mode 100644 vendor/github.com/go-task/slim-sprig/crypto.go create mode 100644 vendor/github.com/go-task/slim-sprig/date.go create mode 100644 vendor/github.com/go-task/slim-sprig/defaults.go create mode 100644 vendor/github.com/go-task/slim-sprig/dict.go create mode 100644 vendor/github.com/go-task/slim-sprig/doc.go create mode 100644 vendor/github.com/go-task/slim-sprig/functions.go create mode 100644 vendor/github.com/go-task/slim-sprig/list.go create mode 100644 vendor/github.com/go-task/slim-sprig/network.go create mode 100644 vendor/github.com/go-task/slim-sprig/numeric.go create mode 100644 vendor/github.com/go-task/slim-sprig/reflect.go create mode 100644 vendor/github.com/go-task/slim-sprig/regex.go create mode 100644 vendor/github.com/go-task/slim-sprig/strings.go create mode 100644 vendor/github.com/go-task/slim-sprig/url.go create mode 100644 vendor/github.com/google/pprof/AUTHORS create mode 100644 vendor/github.com/google/pprof/CONTRIBUTORS create mode 100644 vendor/github.com/google/pprof/LICENSE create mode 100644 vendor/github.com/google/pprof/profile/encode.go create mode 100644 vendor/github.com/google/pprof/profile/filter.go create mode 100644 vendor/github.com/google/pprof/profile/index.go create mode 100644 vendor/github.com/google/pprof/profile/legacy_java_profile.go create mode 100644 vendor/github.com/google/pprof/profile/legacy_profile.go create mode 100644 vendor/github.com/google/pprof/profile/merge.go create mode 100644 vendor/github.com/google/pprof/profile/profile.go create mode 100644 vendor/github.com/google/pprof/profile/proto.go create mode 100644 vendor/github.com/google/pprof/profile/prune.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/LICENSE create mode 100644 vendor/github.com/onsi/ginkgo/v2/config/deprecated.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/code_location.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/config.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/enum_support.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/errors.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/file_filter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/flags.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/label_filter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/report_entry.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/types.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/version.go create mode 100644 vendor/github.com/quic-go/quic-go/.gitignore create mode 100644 vendor/github.com/quic-go/quic-go/.golangci.yml create mode 100644 vendor/github.com/quic-go/quic-go/Changelog.md create mode 100644 vendor/github.com/quic-go/quic-go/LICENSE create mode 100644 vendor/github.com/quic-go/quic-go/README.md create mode 100644 vendor/github.com/quic-go/quic-go/SECURITY.md create mode 100644 vendor/github.com/quic-go/quic-go/buffer_pool.go create mode 100644 vendor/github.com/quic-go/quic-go/client.go create mode 100644 vendor/github.com/quic-go/quic-go/closed_conn.go create mode 100644 vendor/github.com/quic-go/quic-go/codecov.yml create mode 100644 vendor/github.com/quic-go/quic-go/config.go create mode 100644 vendor/github.com/quic-go/quic-go/conn_id_generator.go create mode 100644 vendor/github.com/quic-go/quic-go/conn_id_manager.go create mode 100644 vendor/github.com/quic-go/quic-go/connection.go create mode 100644 vendor/github.com/quic-go/quic-go/connection_logging.go create mode 100644 vendor/github.com/quic-go/quic-go/connection_timer.go create mode 100644 vendor/github.com/quic-go/quic-go/crypto_stream.go create mode 100644 vendor/github.com/quic-go/quic-go/crypto_stream_manager.go create mode 100644 vendor/github.com/quic-go/quic-go/datagram_queue.go create mode 100644 vendor/github.com/quic-go/quic-go/errors.go create mode 100644 vendor/github.com/quic-go/quic-go/frame_sorter.go create mode 100644 vendor/github.com/quic-go/quic-go/framer.go create mode 100644 vendor/github.com/quic-go/quic-go/interface.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/ackhandler/ack_eliciting.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/ackhandler/ackhandler.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/ackhandler/ecn.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/ackhandler/frame.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/ackhandler/mockgen.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/ackhandler/packet.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/ackhandler/packet_number_generator.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_handler.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_history.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_tracker.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/ackhandler/send_mode.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/congestion/bandwidth.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/congestion/clock.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/congestion/cubic.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/congestion/cubic_sender.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/congestion/hybrid_slow_start.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/congestion/interface.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/congestion/pacer.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/flowcontrol/base_flow_controller.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/flowcontrol/connection_flow_controller.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/flowcontrol/interface.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/flowcontrol/stream_flow_controller.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/handshake/aead.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/handshake/cipher_suite.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/handshake/header_protector.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/handshake/hkdf.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/handshake/initial_aead.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/handshake/interface.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/handshake/retry.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/handshake/session_ticket.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/handshake/token_generator.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/handshake/token_protector.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/handshake/updatable_aead.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/protocol/connection_id.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/protocol/encryption_level.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/protocol/key_phase.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/protocol/packet_number.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/protocol/params.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/protocol/perspective.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/protocol/protocol.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/protocol/stream.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/protocol/version.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/qerr/error_codes.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/qerr/errors.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/qtls/cipher_suite.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/qtls/client_session_cache.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/qtls/conn.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/qtls/qtls.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/utils/buffered_write_closer.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/utils/linkedlist/README.md create mode 100644 vendor/github.com/quic-go/quic-go/internal/utils/linkedlist/linkedlist.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/utils/log.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/utils/rand.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/utils/ringbuffer/ringbuffer.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/utils/timer.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/ack_frame.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/ack_range.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/connection_close_frame.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/crypto_frame.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/data_blocked_frame.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/datagram_frame.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/extended_header.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/frame_parser.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/handshake_done_frame.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/header.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/interface.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/log.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/max_data_frame.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/max_stream_data_frame.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/max_streams_frame.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/new_connection_id_frame.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/new_token_frame.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/path_challenge_frame.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/path_response_frame.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/ping_frame.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/pool.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/reset_stream_frame.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/retire_connection_id_frame.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/short_header.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/stop_sending_frame.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/stream_data_blocked_frame.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/stream_frame.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/streams_blocked_frame.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/transport_parameters.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/version_negotiation.go create mode 100644 vendor/github.com/quic-go/quic-go/logging/connection_tracer.go create mode 100644 vendor/github.com/quic-go/quic-go/logging/frame.go create mode 100644 vendor/github.com/quic-go/quic-go/logging/interface.go create mode 100644 vendor/github.com/quic-go/quic-go/logging/packet_header.go create mode 100644 vendor/github.com/quic-go/quic-go/logging/tracer.go create mode 100644 vendor/github.com/quic-go/quic-go/logging/types.go create mode 100644 vendor/github.com/quic-go/quic-go/mockgen.go create mode 100644 vendor/github.com/quic-go/quic-go/mtu_discoverer.go create mode 100644 vendor/github.com/quic-go/quic-go/multiplexer.go create mode 100644 vendor/github.com/quic-go/quic-go/oss-fuzz.sh create mode 100644 vendor/github.com/quic-go/quic-go/packet_handler_map.go create mode 100644 vendor/github.com/quic-go/quic-go/packet_packer.go create mode 100644 vendor/github.com/quic-go/quic-go/packet_unpacker.go create mode 100644 vendor/github.com/quic-go/quic-go/quicvarint/io.go create mode 100644 vendor/github.com/quic-go/quic-go/quicvarint/varint.go create mode 100644 vendor/github.com/quic-go/quic-go/receive_stream.go create mode 100644 vendor/github.com/quic-go/quic-go/retransmission_queue.go create mode 100644 vendor/github.com/quic-go/quic-go/send_conn.go create mode 100644 vendor/github.com/quic-go/quic-go/send_queue.go create mode 100644 vendor/github.com/quic-go/quic-go/send_stream.go create mode 100644 vendor/github.com/quic-go/quic-go/server.go create mode 100644 vendor/github.com/quic-go/quic-go/stream.go create mode 100644 vendor/github.com/quic-go/quic-go/streams_map.go create mode 100644 vendor/github.com/quic-go/quic-go/streams_map_incoming.go create mode 100644 vendor/github.com/quic-go/quic-go/streams_map_outgoing.go create mode 100644 vendor/github.com/quic-go/quic-go/sys_conn.go create mode 100644 vendor/github.com/quic-go/quic-go/sys_conn_buffers.go create mode 100644 vendor/github.com/quic-go/quic-go/sys_conn_buffers_write.go create mode 100644 vendor/github.com/quic-go/quic-go/sys_conn_df.go create mode 100644 vendor/github.com/quic-go/quic-go/sys_conn_df_darwin.go create mode 100644 vendor/github.com/quic-go/quic-go/sys_conn_df_linux.go create mode 100644 vendor/github.com/quic-go/quic-go/sys_conn_df_windows.go create mode 100644 vendor/github.com/quic-go/quic-go/sys_conn_helper_darwin.go create mode 100644 vendor/github.com/quic-go/quic-go/sys_conn_helper_freebsd.go create mode 100644 vendor/github.com/quic-go/quic-go/sys_conn_helper_linux.go create mode 100644 vendor/github.com/quic-go/quic-go/sys_conn_helper_nonlinux.go create mode 100644 vendor/github.com/quic-go/quic-go/sys_conn_no_oob.go create mode 100644 vendor/github.com/quic-go/quic-go/sys_conn_oob.go create mode 100644 vendor/github.com/quic-go/quic-go/sys_conn_windows.go create mode 100644 vendor/github.com/quic-go/quic-go/token_store.go create mode 100644 vendor/github.com/quic-go/quic-go/tools.go create mode 100644 vendor/github.com/quic-go/quic-go/transport.go create mode 100644 vendor/go.uber.org/mock/AUTHORS create mode 100644 vendor/go.uber.org/mock/LICENSE create mode 100644 vendor/go.uber.org/mock/mockgen/generic_go118.go create mode 100644 vendor/go.uber.org/mock/mockgen/generic_notgo118.go create mode 100644 vendor/go.uber.org/mock/mockgen/mockgen.go create mode 100644 vendor/go.uber.org/mock/mockgen/model/model.go create mode 100644 vendor/go.uber.org/mock/mockgen/parse.go create mode 100644 vendor/go.uber.org/mock/mockgen/reflect.go create mode 100644 vendor/go.uber.org/mock/mockgen/version.go create mode 100644 vendor/golang.org/x/exp/LICENSE create mode 100644 vendor/golang.org/x/exp/PATENTS create mode 100644 vendor/golang.org/x/exp/rand/exp.go create mode 100644 vendor/golang.org/x/exp/rand/normal.go create mode 100644 vendor/golang.org/x/exp/rand/rand.go create mode 100644 vendor/golang.org/x/exp/rand/rng.go create mode 100644 vendor/golang.org/x/exp/rand/zipf.go create mode 100644 vendor/golang.org/x/mod/LICENSE create mode 100644 vendor/golang.org/x/mod/PATENTS create mode 100644 vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go create mode 100644 vendor/golang.org/x/mod/modfile/print.go create mode 100644 vendor/golang.org/x/mod/modfile/read.go create mode 100644 vendor/golang.org/x/mod/modfile/rule.go create mode 100644 vendor/golang.org/x/mod/modfile/work.go create mode 100644 vendor/golang.org/x/mod/module/module.go create mode 100644 vendor/golang.org/x/mod/module/pseudo.go create mode 100644 vendor/golang.org/x/mod/semver/semver.go create mode 100644 vendor/golang.org/x/tools/LICENSE create mode 100644 vendor/golang.org/x/tools/PATENTS create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/enclosing.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/imports.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/rewrite.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/util.go create mode 100644 vendor/golang.org/x/tools/go/ast/inspector/inspector.go create mode 100644 vendor/golang.org/x/tools/go/ast/inspector/typeof.go create mode 100644 vendor/golang.org/x/tools/imports/forward.go create mode 100644 vendor/golang.org/x/tools/internal/event/core/event.go create mode 100644 vendor/golang.org/x/tools/internal/event/core/export.go create mode 100644 vendor/golang.org/x/tools/internal/event/core/fast.go create mode 100644 vendor/golang.org/x/tools/internal/event/doc.go create mode 100644 vendor/golang.org/x/tools/internal/event/event.go create mode 100644 vendor/golang.org/x/tools/internal/event/keys/keys.go create mode 100644 vendor/golang.org/x/tools/internal/event/keys/standard.go create mode 100644 vendor/golang.org/x/tools/internal/event/keys/util.go create mode 100644 vendor/golang.org/x/tools/internal/event/label/label.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/invoke.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/vendor.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/version.go create mode 100644 vendor/golang.org/x/tools/internal/gopathwalk/walk.go create mode 100644 vendor/golang.org/x/tools/internal/imports/fix.go create mode 100644 vendor/golang.org/x/tools/internal/imports/imports.go create mode 100644 vendor/golang.org/x/tools/internal/imports/mod.go create mode 100644 vendor/golang.org/x/tools/internal/imports/mod_cache.go create mode 100644 vendor/golang.org/x/tools/internal/imports/sortimports.go create mode 100644 vendor/golang.org/x/tools/internal/stdlib/manifest.go create mode 100644 vendor/golang.org/x/tools/internal/stdlib/stdlib.go diff --git a/go.mod b/go.mod index 4c8836b3..d9f0b727 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module 0xacab.org/leap/bitmask-vpn go 1.22.2 require ( - 0xacab.org/leap/obfsvpn v1.1.0 + 0xacab.org/leap/obfsvpn v1.3.1-0.20241121155258-e6b06efc4456 git.torproject.org/pluggable-transports/goptlib.git v1.3.0 git.torproject.org/pluggable-transports/snowflake.git v1.1.0 github.com/ProtonMail/go-autostart v0.0.0-20210130080809-00ed301c8e9a @@ -49,6 +49,8 @@ require ( github.com/go-openapi/strfmt v0.23.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-openapi/validate v0.24.0 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/google/pprof v0.0.0-20231212022811-ec68065c825e // indirect github.com/google/uuid v1.6.0 // indirect github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -62,6 +64,7 @@ require ( github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/oklog/ulid v1.3.1 // indirect + github.com/onsi/ginkgo/v2 v2.13.2 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pion/datachannel v1.5.7 // indirect github.com/pion/dtls/v2 v2.2.11 // indirect @@ -80,6 +83,7 @@ require ( github.com/pion/turn/v2 v2.1.6 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/quic-go/quic-go v0.47.0 // indirect github.com/refraction-networking/utls v1.6.7 // indirect github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d // indirect github.com/templexxx/cpu v0.1.0 // indirect @@ -95,11 +99,14 @@ require ( go.opentelemetry.io/otel v1.28.0 // indirect go.opentelemetry.io/otel/metric v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.uber.org/mock v0.4.0 // indirect golang.org/x/crypto v0.26.0 // indirect + golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect + golang.org/x/mod v0.17.0 // indirect golang.org/x/net v0.28.0 // indirect golang.org/x/sync v0.8.0 // indirect golang.org/x/text v0.17.0 // indirect - google.golang.org/protobuf v1.33.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 14d62c16..915e51ff 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ 0xacab.org/leap/bitmask-core v0.0.0-20241015185856-0812b9aadf98 h1:0tsyOms2hcHOe9V1aJ8fhRpw/DxpswZzCisIvulLamk= 0xacab.org/leap/bitmask-core v0.0.0-20241015185856-0812b9aadf98/go.mod h1:VDzbMRiY/bCOu4z1DwpzzqgQyj6i82TpuvuCU6ogl6Q= -0xacab.org/leap/obfsvpn v1.1.0 h1:mRiV4jzmQOeRVdhbUQbNdT/aj3rmFqhL5GsK8hIol2E= -0xacab.org/leap/obfsvpn v1.1.0/go.mod h1:o9Jj+5lBfDO/HjJvO7sjnOjziKtwLnbbxMSXrhwupGA= +0xacab.org/leap/obfsvpn v1.3.1-0.20241121155258-e6b06efc4456 h1:vPw0Uof9ArxiwPL/mlVPHDY3lCPFCEJzM4V5c6bdCYY= +0xacab.org/leap/obfsvpn v1.3.1-0.20241121155258-e6b06efc4456/go.mod h1:2Nb7wDMNovIkwXOOBcb6/ocaRj9djAoPYM3NsGhu8JA= 0xacab.org/leap/tunnel-telemetry v0.0.0-20240830081933-7328bb50078b h1:GI6SVhECFVdHalARYd5Qt/i5/+M0fjcLtJIEEQVO4Sk= 0xacab.org/leap/tunnel-telemetry v0.0.0-20240830081933-7328bb50078b/go.mod h1:Oy9wDiKAy+hq+V04b7CM+gbj9wOrIcX+OZKMVjrdBCs= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -127,6 +127,8 @@ github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3Bum github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -163,6 +165,8 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20231212022811-ec68065c825e h1:bwOy7hAFd0C91URzMIEBfr6BAz29yk7Qj0cy6S7DJlU= +github.com/google/pprof v0.0.0-20231212022811-ec68065c825e/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -294,10 +298,14 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo/v2 v2.13.2 h1:Bi2gGVkfn6gQcjNjZJVO8Gf0FHzMPf2phUei9tejVMs= +github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= @@ -415,6 +423,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/quic-go/quic-go v0.47.0 h1:yXs3v7r2bm1wmPTYNLKAAJTHMYkPEsfYJmTazXrCZ7Y= +github.com/quic-go/quic-go v0.47.0/go.mod h1:3bCapYsJvXGZcipOHuu7plYtaV6tnF+z7wIFsU0WK9E= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/refraction-networking/utls v1.6.7 h1:zVJ7sP1dJx/WtVuITug3qYUq034cDq9B2MR1K67ULZM= github.com/refraction-networking/utls v1.6.7/go.mod h1:BC3O4vQzye5hqpmDTWUqi4P5DDhzJfkV1tdqtawQIH0= @@ -522,6 +532,8 @@ go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= +go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= @@ -551,8 +563,8 @@ golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1m golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb h1:c0vyKkb6yr3KR7jEfJaOSv4lG7xPkbN6r52aJz1d8a8= -golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -564,6 +576,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -690,6 +704,8 @@ golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -708,6 +724,8 @@ golang.org/x/tools v0.0.0-20200425043458-8463f397d07c/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200808161706-5bf02b21f123/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/0xacab.org/leap/obfsvpn/client/client.go b/vendor/0xacab.org/leap/obfsvpn/client/client.go index ad9745fb..9c920207 100644 --- a/vendor/0xacab.org/leap/obfsvpn/client/client.go +++ b/vendor/0xacab.org/leap/obfsvpn/client/client.go @@ -38,6 +38,7 @@ type EventLogger interface { const ( dialGiveUpTime = 15 * time.Second + reconnectTime = 3 * time.Second ) type Obfs4Config struct { @@ -45,17 +46,30 @@ type Obfs4Config struct { Cert string } +type Obfs4Conn struct { + net.Conn + config Obfs4Config +} + +func NewObfs4Conn(conn net.Conn, config Obfs4Config) *Obfs4Conn { + return &Obfs4Conn{ + Conn: conn, + config: config, + } +} + func (oc *Obfs4Config) String() string { return oc.Remote } type Config struct { - ProxyAddr string `json:"proxy_addr"` - HoppingConfig HoppingConfig `json:"hopping_config"` - KCPConfig obfsvpn.KCPConfig `json:"kcp_config"` - RemoteIP string `json:"remote_ip"` - RemotePort string `json:"remote_port"` - Obfs4Cert string `json:"obfs4_cert"` + ProxyAddr string `json:"proxy_addr"` + HoppingConfig HoppingConfig `json:"hopping_config"` + KCPConfig obfsvpn.KCPConfig `json:"kcp_config"` + QUICConfig obfsvpn.QUICConfig `json:"quic_config"` + RemoteIP string `json:"remote_ip"` + RemotePort string `json:"remote_port"` + Obfs4Cert string `json:"obfs4_cert"` } type HoppingConfig struct { @@ -64,17 +78,19 @@ type HoppingConfig struct { Obfs4Certs []string `json:"obfs4_certs"` PortSeed int64 `json:"port_seed"` PortCount uint `json:"port_count"` + MinHopPort uint `json:"min_hop_port"` + MaxHopPort uint `json:"max_hop_port"` MinHopSeconds uint `json:"min_hop_seconds"` HopJitter uint `json:"hop_jitter"` } type Client struct { kcpConfig obfsvpn.KCPConfig + quicConfig obfsvpn.QUICConfig ProxyAddr string - newObfs4Conn chan net.Conn - obfs4Conns []net.Conn + newObfs4Conn chan Obfs4Conn + obfs4Conns []Obfs4Conn obfs4Endpoints []*Obfs4Config - obfs4Dialer *obfsvpn.Dialer obfs4Failures map[string]int32 EventLogger EventLogger state clientState @@ -98,9 +114,10 @@ func NewClient(ctx context.Context, stop context.CancelFunc, config Config) *Cli ctx: ctx, hopJitter: config.HoppingConfig.HopJitter, kcpConfig: config.KCPConfig, + quicConfig: config.QUICConfig, obfs4Failures: map[string]int32{}, minHopSeconds: config.HoppingConfig.MinHopSeconds, - newObfs4Conn: make(chan net.Conn), + newObfs4Conn: make(chan Obfs4Conn), obfs4Endpoints: obfs4Endpoints, stop: stop, state: stopped, @@ -124,13 +141,14 @@ func generateObfs4Config(config Config) []*Obfs4Config { obfsEndpoints := []*Obfs4Config{} if config.HoppingConfig.Enabled { + portHopRange := int(config.HoppingConfig.MaxHopPort - config.HoppingConfig.MinHopPort) for i, obfs4Remote := range config.HoppingConfig.Remotes { // We want a non-crypto RNG so that we can share a seed // #nosec G404 r := rand.New(rand.NewSource(config.HoppingConfig.PortSeed)) for pi := 0; pi < int(config.HoppingConfig.PortCount); pi++ { - portOffset := r.Intn(obfsvpn.PortHopRange) - addr := net.JoinHostPort(obfs4Remote, fmt.Sprint(portOffset+obfsvpn.MinHopPort)) + portOffset := r.Intn(portHopRange) + addr := net.JoinHostPort(obfs4Remote, fmt.Sprint(portOffset+int(config.HoppingConfig.MinHopPort))) obfsEndpoints = append(obfsEndpoints, &Obfs4Config{ Cert: config.HoppingConfig.Obfs4Certs[i], Remote: addr, @@ -177,34 +195,18 @@ func (c *Client) Start() (bool, error) { c.updateState(starting) obfs4Endpoint := c.obfs4Endpoints[0] - - c.obfs4Dialer, err = obfsvpn.NewDialerFromCert(obfs4Endpoint.Cert) - if err != nil { - return false, fmt.Errorf("could not dial obfs4 remote: %w", err) - } - - if c.kcpConfig.Enabled { - c.obfs4Dialer.DialFunc = obfsvpn.GetKCPDialer(c.kcpConfig, c.log) - } - - obfs4Conn, err := c.obfs4Dialer.Dial("tcp", obfs4Endpoint.Remote) + obfs4Conn, err := c.createObfs4Connection(obfs4Endpoint) if err != nil { - c.error("Could not dial obfs4 remote: %v", err) + c.error("Could not createObfs4Connection: %v", err) return false, fmt.Errorf("could not dial remote: %w", err) } - - c.obfs4Conns = []net.Conn{obfs4Conn} + c.obfs4Conns = []Obfs4Conn{*obfs4Conn} c.updateState(running) - proxyAddr, err := net.ResolveUDPAddr("udp", c.ProxyAddr) - if err != nil { - return false, fmt.Errorf("cannot resolve UDP addr: %w", err) - } - - c.openvpnConn, err = net.ListenUDP("udp", proxyAddr) + c.openvpnConn, err = c.createOpenvpnConnection() if err != nil { - return false, fmt.Errorf("error accepting udp connection: %w", err) + return false, err } if c.hopEnabled { @@ -222,6 +224,32 @@ func (c *Client) Start() (bool, error) { return true, nil } +func (c *Client) createObfs4Connection(obfs4Endpoint *Obfs4Config) (*Obfs4Conn, error) { + var err error + + obfs4Dialer, err := obfsvpn.NewDialerFromCert(obfs4Endpoint.Cert) + if err != nil { + c.error("Could not get obfs4Dialer from cert: %v", err) + return nil, err + } + + ctx, cancel := context.WithTimeout(context.Background(), dialGiveUpTime) + defer cancel() + + if c.kcpConfig.Enabled { + obfs4Dialer.DialFunc = obfsvpn.GetKCPDialer(c.kcpConfig, c.log) + } else if c.quicConfig.Enabled { + obfs4Dialer.DialFunc = obfsvpn.GetQUICDialer(ctx, c.quicConfig, c.log) + } + + c.log("Dialing remote: %v", obfs4Endpoint.Remote) + conn, err := obfs4Dialer.DialContext(ctx, "tcp", obfs4Endpoint.Remote) + if err != nil { + return nil, fmt.Errorf("error in obfs4Dialer.DialContext: %w", err) + } + return NewObfs4Conn(conn, *obfs4Endpoint), nil +} + // updateState sets a new client state, logs it and sends an event to the clients // EventLogger in case it is available. Always set the state with this function in // order to ensure integrating clients receive an update state event via FFI. @@ -244,6 +272,40 @@ func (c *Client) pickRandomEndpoint() *Obfs4Config { return endpoint } +func (c *Client) connectObfs4(obfs4Endpoint *Obfs4Config, sleepSeconds int) { + newObfs4Conn, err := c.createObfs4Connection(obfs4Endpoint) + + if err != nil { + newRemote := obfs4Endpoint.Remote + _, ok := c.obfs4Failures[newRemote] + if ok { + c.obfs4Failures[newRemote] += 1 + } else { + c.obfs4Failures[newRemote] = 1 + } + c.error("Could not dial obfs4 remote: %v (failures: %d)", err, c.obfs4Failures[newRemote]) + } + + if newObfs4Conn == nil { + c.error("Did not get obfs4: %v ", err) + } else { + c.outLock.Lock() + c.obfs4Conns = append([]Obfs4Conn{*newObfs4Conn}, c.obfs4Conns...) + c.outLock.Unlock() + + c.newObfs4Conn <- *newObfs4Conn + c.log("Dialed new remote") + + // If we wait sleepSeconds here to clean up the previous connection, we can guarantee that the + // connection list will not grow unbounded + go func() { + time.Sleep(time.Duration(sleepSeconds) * time.Second) + + c.cleanupOldConn() + }() + } +} + func (c *Client) hop() { for { select { @@ -285,51 +347,8 @@ func (c *Client) hop() { } c.log("HOPPING to %+v", newRemote) + c.connectObfs4(obfs4Endpoint, sleepSeconds) - obfs4Dialer, err := obfsvpn.NewDialerFromCert(obfs4Endpoint.Cert) - if err != nil { - c.error("Could not dial obfs4 remote: %v", err) - return - } - - if c.kcpConfig.Enabled { - c.obfs4Dialer.DialFunc = obfsvpn.GetKCPDialer(c.kcpConfig, c.log) - } - - ctx, cancel := context.WithTimeout(context.Background(), dialGiveUpTime) - defer cancel() - - c.log("Dialing new remote: %v", newRemote) - newObfs4Conn, err := obfs4Dialer.DialContext(ctx, "tcp", newRemote) - - if err != nil { - _, ok := c.obfs4Failures[newRemote] - if ok { - c.obfs4Failures[newRemote] += 1 - } else { - c.obfs4Failures[newRemote] = 1 - } - c.error("Could not dial obfs4 remote: %v (failures: %d)", err, c.obfs4Failures[newRemote]) - } - - if newObfs4Conn == nil { - c.error("Did not get obfs4: %v ", err) - } else { - c.outLock.Lock() - c.obfs4Conns = append([]net.Conn{newObfs4Conn}, c.obfs4Conns...) - c.outLock.Unlock() - - c.newObfs4Conn <- newObfs4Conn - c.log("Dialed new remote") - - // If we wait sleepSeconds here to clean up the previous connection, we can guarantee that the - // connection list will not grow unbounded - go func() { - time.Sleep(time.Duration(sleepSeconds) * time.Second) - - c.cleanupOldConn() - }() - } } } @@ -340,13 +359,11 @@ func (c *Client) cleanupOldConn() { if len(c.obfs4Conns) > 1 { c.log("Connections: %v", len(c.obfs4Conns)) connToClose := c.obfs4Conns[len(c.obfs4Conns)-1] - if connToClose != nil { - c.log("Cleaning up old connection to %v", connToClose.RemoteAddr()) + c.log("Cleaning up old connection to %v", connToClose.RemoteAddr()) - err := connToClose.Close() - if err != nil { - c.log("Error closing obfs4 connection to %v: %v", connToClose.RemoteAddr(), err) - } + err := connToClose.Close() + if err != nil { + c.log("Error closing obfs4 connection to %v: %v", connToClose.RemoteAddr(), err) } // Remove the connection from our tracking list @@ -375,21 +392,19 @@ func (c *Client) readUDPWriteTCP() { c.openvpnAddrLock.Unlock() } - func() { - // Always write to the first connection in our list because it will be most up to date - func() { - conn, err := c.getUsableConnection() - if err != nil { - c.log("Cannot get connection: %s", err) - return - } - _, err = conn.Write(tcpBuffer) - if err != nil { - c.log("Write err from %v to %v: %v", conn.LocalAddr(), conn.RemoteAddr(), err) - return - } - }() - }() + // Always write to the first connection in our list because it will be most up to date + conn, err := c.getUsableConnection() + if err != nil { + c.error("Cannot get connection: %s", err) + continue + } + _, err = conn.Write(tcpBuffer) + if err != nil { + c.error("readUDPWriteTCP: Write err from %v to %v: %v", conn.LocalAddr(), conn.RemoteAddr(), err) + time.Sleep(reconnectTime) + config := c.obfs4Conns[0].config + c.connectObfs4(&config, 20) + } } } @@ -440,18 +455,43 @@ func (c *Client) readTCPWriteUDP() { for { tcpBytes := <-fromTCP - c.openvpnAddrLock.RLock() _, err := c.openvpnConn.WriteToUDP(tcpBytes, c.openvpnAddr) c.openvpnAddrLock.RUnlock() - if err != nil { - c.error("Write err from %v to %v: %v", c.openvpnConn.LocalAddr(), c.openvpnConn.RemoteAddr(), err) - return + if err != nil && !errors.Is(err, net.ErrClosed) { + c.error("readTCPWriteUDP: Write err from %v to %v: %v", c.openvpnConn.LocalAddr(), c.openvpnConn.RemoteAddr(), err) + c.openvpnAddrLock.Lock() + c.openvpnConn.Close() + c.openvpnConn, err = c.createOpenvpnConnection() + c.openvpnAddrLock.Unlock() + if err == nil { + c.openvpnAddrLock.RLock() + _, err := c.openvpnConn.WriteToUDP(tcpBytes, c.openvpnAddr) + c.openvpnAddrLock.RUnlock() + if err != nil { + c.error("Failed to resend. %v", err) + } + } } } } } +func (c *Client) createOpenvpnConnection() (*net.UDPConn, error) { + proxyAddr, err := net.ResolveUDPAddr("udp", c.ProxyAddr) + if err != nil { + c.error("cannot resolve UDP addr: %v", err) + return nil, err + } + + udpConn, err := net.ListenUDP("udp", proxyAddr) + if err != nil { + c.error("error accepting udp connection: %v", err) + return nil, fmt.Errorf("error accepting udp connection: %w", err) + } + return udpConn, nil +} + func (c *Client) Stop() (bool, error) { c.mux.Lock() defer c.mux.Unlock() diff --git a/vendor/0xacab.org/leap/obfsvpn/obfsvpn/dialer.go b/vendor/0xacab.org/leap/obfsvpn/obfsvpn/dialer.go index 71e2d0ac..7a6be08d 100644 --- a/vendor/0xacab.org/leap/obfsvpn/obfsvpn/dialer.go +++ b/vendor/0xacab.org/leap/obfsvpn/obfsvpn/dialer.go @@ -134,7 +134,7 @@ func (d *Dialer) Dial(network, address string) (net.Conn, error) { return d.dial(ctx, network, address, func(network, address string) (net.Conn, error) { conn, err := d.Dialer.DialContext(ctx, network, address) if err != nil { - return nil, fmt.Errorf("%w: %s", ErrCannotDial, err) + return nil, fmt.Errorf("%s: %w", ErrCannotDial, err) } return conn.(*net.TCPConn), err }) diff --git a/vendor/0xacab.org/leap/obfsvpn/obfsvpn/hop.go b/vendor/0xacab.org/leap/obfsvpn/obfsvpn/hop.go index 2404e2d7..6a9179e5 100644 --- a/vendor/0xacab.org/leap/obfsvpn/obfsvpn/hop.go +++ b/vendor/0xacab.org/leap/obfsvpn/obfsvpn/hop.go @@ -1,8 +1,6 @@ package obfsvpn -const MinHopPort = 49152 -const MaxHopPort = 65535 - -var PortHopRange = MaxHopPort - MinHopPort +const MinHopPort uint = 49152 +const MaxHopPort uint = 65535 const MaxUDPLen uint = 65507 diff --git a/vendor/0xacab.org/leap/obfsvpn/obfsvpn/listener.go b/vendor/0xacab.org/leap/obfsvpn/obfsvpn/listener.go index a899d65d..4cf81fa1 100644 --- a/vendor/0xacab.org/leap/obfsvpn/obfsvpn/listener.go +++ b/vendor/0xacab.org/leap/obfsvpn/obfsvpn/listener.go @@ -4,10 +4,12 @@ import ( "bytes" "context" "crypto/rand" + "crypto/tls" "encoding/hex" "log" "net" + "github.com/quic-go/quic-go" "github.com/xtaci/kcp-go/v5" "gitlab.com/yawning/obfs4.git/common/ntor" "gitlab.com/yawning/obfs4.git/transports/base" @@ -21,6 +23,7 @@ import ( type ListenConfig struct { ListenConfig net.ListenConfig KCPConfig KCPConfig + QUICConfig QUICConfig NodeID *ntor.NodeID PrivateKey *ntor.PrivateKey @@ -31,7 +34,7 @@ type ListenConfig struct { // NewListenConfig returns a ListenConfig and any error during the initialization. // perhaps this is redundant, but using the same json format than ss for debug. -func NewListenConfig(nodeIDStr, privKeyStr, pubKeyStr, seedStr, stateDir string, kcpConfig KCPConfig) (*ListenConfig, error) { +func NewListenConfig(nodeIDStr, privKeyStr, pubKeyStr, seedStr, stateDir string, kcpConfig KCPConfig, quicConfig QUICConfig) (*ListenConfig, error) { var err error var seed [ntor.KeySeedLength]byte var nodeID *ntor.NodeID @@ -60,6 +63,7 @@ func NewListenConfig(nodeIDStr, privKeyStr, pubKeyStr, seedStr, stateDir string, Seed: seed, StateDir: stateDir, KCPConfig: kcpConfig, + QUICConfig: quicConfig, } return lc, nil } @@ -138,6 +142,33 @@ func (lc *ListenConfig) Listen(ctx context.Context, address string) (*Listener, } wrappedListener.kcpConfig = lc.KCPConfig return wrappedListener, nil + } else if lc.QUICConfig.Enabled { + log.Println("quic listen on", address) + tlsConf := &tls.Config{ + Certificates: []tls.Certificate{*lc.QUICConfig.TLSCert}, + NextProtos: []string{}, + MinVersion: tls.VersionTLS12, + } + + quicConf := &quic.Config{} + udpAddr, err := net.ResolveUDPAddr("udp", address) + if err != nil { + return nil, err + } + + udpConn, err := net.ListenUDP("udp", udpAddr) + if err != nil { + return nil, err + } + ql, err := quic.Listen(udpConn, tlsConf, quicConf) + if err != nil { + return nil, err + } + ln := QUICListener{ + ql: ql, + ctx: ctx, + } + return lc.Wrap(ctx, &ln) } ln, err := lc.ListenConfig.Listen(ctx, "tcp", address) if err != nil { diff --git a/vendor/0xacab.org/leap/obfsvpn/obfsvpn/quic.go b/vendor/0xacab.org/leap/obfsvpn/obfsvpn/quic.go new file mode 100644 index 00000000..ba37da92 --- /dev/null +++ b/vendor/0xacab.org/leap/obfsvpn/obfsvpn/quic.go @@ -0,0 +1,92 @@ +package obfsvpn + +import ( + "context" + "crypto/tls" + "net" + + "github.com/quic-go/quic-go" +) + +type QUICConfig struct { + Enabled bool + TLSCert *tls.Certificate +} + +type QUICConn struct { + quic.Stream + conn quic.Connection +} + +func (q *QUICConn) Close() error { + return q.conn.CloseWithError(0, "closing obfs4 connection") +} + +func (q *QUICConn) LocalAddr() net.Addr { + return q.conn.LocalAddr() +} + +func (q *QUICConn) RemoteAddr() net.Addr { + return q.conn.RemoteAddr() +} + +type QUICListener struct { + ql *quic.Listener + ctx context.Context +} + +func (q *QUICListener) Accept() (net.Conn, error) { + conn, err := q.ql.Accept(q.ctx) + if err != nil { + return nil, err + } + + str, err := conn.AcceptStream(q.ctx) + if err != nil { + return nil, err + } + + return &QUICConn{str, conn}, nil +} + +func (q *QUICListener) Close() error { + return q.ql.Close() +} + +// Addr returns the listener's network address. +func (q *QUICListener) Addr() net.Addr { + return q.ql.Addr() +} + +func DefaultQUICConfig() *QUICConfig { + return &QUICConfig{ + Enabled: true, + } +} + +func GetQUICDialer(ctx context.Context, quicConfig QUICConfig, logger func(format string, a ...interface{})) func(network, address string) (net.Conn, error) { + quicConf := &quic.Config{} + return func(network, address string) (net.Conn, error) { + tlsConf := &tls.Config{ + //nolint:gosec // We can skip actual TLS verification because we're only using this layer for obfuscation + InsecureSkipVerify: true, + MinVersion: tls.VersionTLS12, + } + + if logger != nil { + logger("Dialing quic://%s", address) + } + + conn, err := quic.DialAddr(ctx, address, tlsConf, quicConf) + if err != nil { + return nil, err + } + + str, err := conn.OpenStreamSync(ctx) + if err != nil { + return nil, err + } + + return &QUICConn{str, conn}, nil + } +} diff --git a/vendor/github.com/go-task/slim-sprig/.editorconfig b/vendor/github.com/go-task/slim-sprig/.editorconfig new file mode 100644 index 00000000..b0c95367 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/.editorconfig @@ -0,0 +1,14 @@ +# editorconfig.org + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = tab +indent_size = 8 + +[*.{md,yml,yaml,json}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-task/slim-sprig/.gitattributes b/vendor/github.com/go-task/slim-sprig/.gitattributes new file mode 100644 index 00000000..176a458f --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/.gitattributes @@ -0,0 +1 @@ +* text=auto diff --git a/vendor/github.com/go-task/slim-sprig/.gitignore b/vendor/github.com/go-task/slim-sprig/.gitignore new file mode 100644 index 00000000..5e3002f8 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/.gitignore @@ -0,0 +1,2 @@ +vendor/ +/.glide diff --git a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md b/vendor/github.com/go-task/slim-sprig/CHANGELOG.md new file mode 100644 index 00000000..61d8ebff --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/CHANGELOG.md @@ -0,0 +1,364 @@ +# Changelog + +## Release 3.2.0 (2020-12-14) + +### Added + +- #211: Added randInt function (thanks @kochurovro) +- #223: Added fromJson and mustFromJson functions (thanks @mholt) +- #242: Added a bcrypt function (thanks @robbiet480) +- #253: Added randBytes function (thanks @MikaelSmith) +- #254: Added dig function for dicts (thanks @nyarly) +- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) +- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) +- #268: Added and and all functions for testing conditions (thanks @phuslu) +- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf + (thanks @andrewmostello) +- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) +- #270: Extend certificate functions to handle non-RSA keys + add support for + ed25519 keys (thanks @misberner) + +### Changed + +- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer +- Using semver 3.1.1 and mergo 0.3.11 + +### Fixed + +- #249: Fix htmlDateInZone example (thanks @spawnia) + +NOTE: The dependency github.com/imdario/mergo reverted the breaking change in +0.3.9 via 0.3.10 release. + +## Release 3.1.0 (2020-04-16) + +NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 +that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. + +### Added + +- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) +- #224: Added duration filter (thanks @frebib) +- #205: Added `seq` function (thanks @thadc23) + +### Changed + +- #203: Unlambda functions with correct signature (thanks @muesli) +- #236: Updated the license formatting for GitHub display purposes +- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 + as it causes a breaking change for sprig. That issue is tracked at + https://github.com/imdario/mergo/issues/139 + +### Fixed + +- #229: Fix `seq` example in docs (thanks @kalmant) + +## Release 3.0.2 (2019-12-13) + +### Fixed + +- #220: Updating to semver v3.0.3 to fix issue with <= ranges +- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) + +## Release 3.0.1 (2019-12-08) + +### Fixed + +- #212: Updated semver fixing broken constraint checking with ^0.0 + +## Release 3.0.0 (2019-10-02) + +### Added + +- #187: Added durationRound function (thanks @yjp20) +- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) +- #193: Added toRawJson support (thanks @Dean-Coakley) +- #197: Added get support to dicts (thanks @Dean-Coakley) + +### Changed + +- #186: Moving dependency management to Go modules +- #186: Updated semver to v3. This has changes in the way ^ is handled +- #194: Updated documentation on merging and how it copies. Added example using deepCopy +- #196: trunc now supports negative values (thanks @Dean-Coakley) + +## Release 2.22.0 (2019-10-02) + +### Added + +- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) +- #195: Added deepCopy function for use with dicts + +### Changed + +- Updated merge and mergeOverwrite documentation to explain copying and how to + use deepCopy with it + +## Release 2.21.0 (2019-09-18) + +### Added + +- #122: Added encryptAES/decryptAES functions (thanks @n0madic) +- #128: Added toDecimal support (thanks @Dean-Coakley) +- #169: Added list contcat (thanks @astorath) +- #174: Added deepEqual function (thanks @bonifaido) +- #170: Added url parse and join functions (thanks @astorath) + +### Changed + +- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify + +### Fixed + +- #172: Fix semver wildcard example (thanks @piepmatz) +- #175: Fix dateInZone doc example (thanks @s3than) + +## Release 2.20.0 (2019-06-18) + +### Added + +- #164: Adding function to get unix epoch for a time (@mattfarina) +- #166: Adding tests for date_in_zone (@mattfarina) + +### Changed + +- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) +- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) +- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) + +### Fixed + +## Release 2.19.0 (2019-03-02) + +IMPORTANT: This release reverts a change from 2.18.0 + +In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. + +We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. + +### Changed + +- Fix substr panic 35fb796 (Alexey igrychev) +- Remove extra period 1eb7729 (Matthew Lorimor) +- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) +- README edits/fixes/suggestions 08fe136 (Lauri Apple) + + +## Release 2.18.0 (2019-02-12) + +### Added + +- Added mergeOverwrite function +- cryptographic functions that use secure random (see fe1de12) + +### Changed + +- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) +- Handle has for nil list 9c10885 (Daniel Cohen) +- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) +- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) +- Replace outdated goutils imports 01893d2 (Matthew Lorimor) +- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) +- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) + +### Fixed + +- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) +- Fix substr var names and comments d581f80 (Dean Coakley) +- Fix substr documentation 2737203 (Dean Coakley) + +## Release 2.17.1 (2019-01-03) + +### Fixed + +The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. + +## Release 2.17.0 (2019-01-03) + +### Added + +- adds alder32sum function and test 6908fc2 (marshallford) +- Added kebabcase function ca331a1 (Ilyes512) + +### Changed + +- Update goutils to 1.1.0 4e1125d (Matt Butcher) + +### Fixed + +- Fix 'has' documentation e3f2a85 (dean-coakley) +- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) +- fixes spelling errors... not sure how that happened 4cf188a (marshallford) + +## Release 2.16.0 (2018-08-13) + +### Added + +- add splitn function fccb0b0 (Helgi Þorbjörnsson) +- Add slice func df28ca7 (gongdo) +- Generate serial number a3bdffd (Cody Coons) +- Extract values of dict with values function df39312 (Lawrence Jones) + +### Changed + +- Modify panic message for list.slice ae38335 (gongdo) +- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) +- Remove duplicated documentation 1d97af1 (Matthew Fisher) +- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) + +### Fixed + +- Fix file permissions c5f40b5 (gongdo) +- Fix example for buildCustomCert 7779e0d (Tin Lam) + +## Release 2.15.0 (2018-04-02) + +### Added + +- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) +- #66: Add ternary function (thanks @binoculars) +- #67: Allow keys function to take multiple dicts (thanks @binoculars) +- #89: Added sha1sum to crypto function (thanks @benkeil) +- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) +- #92: Add travis testing for go 1.10 +- #93: Adding appveyor config for windows testing + +### Changed + +- #90: Updating to more recent dependencies +- #73: replace satori/go.uuid with google/uuid (thanks @petterw) + +### Fixed + +- #76: Fixed documentation typos (thanks @Thiht) +- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older + +## Release 2.14.1 (2017-12-01) + +### Fixed + +- #60: Fix typo in function name documentation (thanks @neil-ca-moore) +- #61: Removing line with {{ due to blocking github pages genertion +- #64: Update the list functions to handle int, string, and other slices for compatibility + +## Release 2.14.0 (2017-10-06) + +This new version of Sprig adds a set of functions for generating and working with SSL certificates. + +- `genCA` generates an SSL Certificate Authority +- `genSelfSignedCert` generates an SSL self-signed certificate +- `genSignedCert` generates an SSL certificate and key based on a given CA + +## Release 2.13.0 (2017-09-18) + +This release adds new functions, including: + +- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions +- `floor`, `ceil`, and `round` math functions +- `toDate` converts a string to a date +- `nindent` is just like `indent` but also prepends a new line +- `ago` returns the time from `time.Now` + +### Added + +- #40: Added basic regex functionality (thanks @alanquillin) +- #41: Added ceil floor and round functions (thanks @alanquillin) +- #48: Added toDate function (thanks @andreynering) +- #50: Added nindent function (thanks @binoculars) +- #46: Added ago function (thanks @slayer) + +### Changed + +- #51: Updated godocs to include new string functions (thanks @curtisallen) +- #49: Added ability to merge multiple dicts (thanks @binoculars) + +## Release 2.12.0 (2017-05-17) + +- `snakecase`, `camelcase`, and `shuffle` are three new string functions +- `fail` allows you to bail out of a template render when conditions are not met + +## Release 2.11.0 (2017-05-02) + +- Added `toJson` and `toPrettyJson` +- Added `merge` +- Refactored documentation + +## Release 2.10.0 (2017-03-15) + +- Added `semver` and `semverCompare` for Semantic Versions +- `list` replaces `tuple` +- Fixed issue with `join` +- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` + +## Release 2.9.0 (2017-02-23) + +- Added `splitList` to split a list +- Added crypto functions of `genPrivateKey` and `derivePassword` + +## Release 2.8.0 (2016-12-21) + +- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) +- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) + +## Release 2.7.0 (2016-12-01) + +- Added `sha256sum` to generate a hash of an input +- Added functions to convert a numeric or string to `int`, `int64`, `float64` + +## Release 2.6.0 (2016-10-03) + +- Added a `uuidv4` template function for generating UUIDs inside of a template. + +## Release 2.5.0 (2016-08-19) + +- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions +- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) +- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 + +## Release 2.4.0 (2016-08-16) + +- Adds two functions: `until` and `untilStep` + +## Release 2.3.0 (2016-06-21) + +- cat: Concatenate strings with whitespace separators. +- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" +- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" +- indent: Indent blocks of text in a way that is sensitive to "\n" characters. + +## Release 2.2.0 (2016-04-21) + +- Added a `genPrivateKey` function (Thanks @bacongobbler) + +## Release 2.1.0 (2016-03-30) + +- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. +- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. + +## Release 2.0.0 (2016-03-29) + +Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. + +- `min` complements `max` (formerly `biggest`) +- `empty` indicates that a value is the empty value for its type +- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` +- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` +- Date formatters have been added for HTML dates (as used in `date` input fields) +- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). + +## Release 1.2.0 (2016-02-01) + +- Added quote and squote +- Added b32enc and b32dec +- add now takes varargs +- biggest now takes varargs + +## Release 1.1.0 (2015-12-29) + +- Added #4: Added contains function. strings.Contains, but with the arguments + switched to simplify common pipelines. (thanks krancour) +- Added Travis-CI testing support + +## Release 1.0.0 (2015-12-23) + +- Initial release diff --git a/vendor/github.com/go-task/slim-sprig/LICENSE.txt b/vendor/github.com/go-task/slim-sprig/LICENSE.txt new file mode 100644 index 00000000..f311b1ea --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2013-2020 Masterminds + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/go-task/slim-sprig/README.md b/vendor/github.com/go-task/slim-sprig/README.md new file mode 100644 index 00000000..72579471 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/README.md @@ -0,0 +1,73 @@ +# Slim-Sprig: Template functions for Go templates [![GoDoc](https://godoc.org/github.com/go-task/slim-sprig?status.svg)](https://godoc.org/github.com/go-task/slim-sprig) [![Go Report Card](https://goreportcard.com/badge/github.com/go-task/slim-sprig)](https://goreportcard.com/report/github.com/go-task/slim-sprig) + +Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with +all functions that depend on external (non standard library) or crypto packages +removed. +The reason for this is to make this library more lightweight. Most of these +functions (specially crypto ones) are not needed on most apps, but costs a lot +in terms of binary size and compilation time. + +## Usage + +**Template developers**: Please use Slim-Sprig's [function documentation](https://go-task.github.io/slim-sprig/) for +detailed instructions and code snippets for the >100 template functions available. + +**Go developers**: If you'd like to include Slim-Sprig as a library in your program, +our API documentation is available [at GoDoc.org](http://godoc.org/github.com/go-task/slim-sprig). + +For standard usage, read on. + +### Load the Slim-Sprig library + +To load the Slim-Sprig `FuncMap`: + +```go + +import ( + "html/template" + + "github.com/go-task/slim-sprig" +) + +// This example illustrates that the FuncMap *must* be set before the +// templates themselves are loaded. +tpl := template.Must( + template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") +) +``` + +### Calling the functions inside of templates + +By convention, all functions are lowercase. This seems to follow the Go +idiom for template functions (as opposed to template methods, which are +TitleCase). For example, this: + +``` +{{ "hello!" | upper | repeat 5 }} +``` + +produces this: + +``` +HELLO!HELLO!HELLO!HELLO!HELLO! +``` + +## Principles Driving Our Function Selection + +We followed these principles to decide which functions to add and how to implement them: + +- Use template functions to build layout. The following + types of operations are within the domain of template functions: + - Formatting + - Layout + - Simple type conversions + - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) +- Template functions should not return errors unless there is no way to print + a sensible value. For example, converting a string to an integer should not + produce an error if conversion fails. Instead, it should display a default + value. +- Simple math is necessary for grid layouts, pagers, and so on. Complex math + (anything other than arithmetic) should be done outside of templates. +- Template functions only deal with the data passed into them. They never retrieve + data from a source. +- Finally, do not override core Go template functions. diff --git a/vendor/github.com/go-task/slim-sprig/Taskfile.yml b/vendor/github.com/go-task/slim-sprig/Taskfile.yml new file mode 100644 index 00000000..cdcfd223 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/Taskfile.yml @@ -0,0 +1,12 @@ +# https://taskfile.dev + +version: '2' + +tasks: + default: + cmds: + - task: test + + test: + cmds: + - go test -v . diff --git a/vendor/github.com/go-task/slim-sprig/crypto.go b/vendor/github.com/go-task/slim-sprig/crypto.go new file mode 100644 index 00000000..d06e516d --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/crypto.go @@ -0,0 +1,24 @@ +package sprig + +import ( + "crypto/sha1" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash/adler32" +) + +func sha256sum(input string) string { + hash := sha256.Sum256([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func sha1sum(input string) string { + hash := sha1.Sum([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func adler32sum(input string) string { + hash := adler32.Checksum([]byte(input)) + return fmt.Sprintf("%d", hash) +} diff --git a/vendor/github.com/go-task/slim-sprig/date.go b/vendor/github.com/go-task/slim-sprig/date.go new file mode 100644 index 00000000..ed022dda --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/date.go @@ -0,0 +1,152 @@ +package sprig + +import ( + "strconv" + "time" +) + +// Given a format and a date, format the date string. +// +// Date can be a `time.Time` or an `int, int32, int64`. +// In the later case, it is treated as seconds since UNIX +// epoch. +func date(fmt string, date interface{}) string { + return dateInZone(fmt, date, "Local") +} + +func htmlDate(date interface{}) string { + return dateInZone("2006-01-02", date, "Local") +} + +func htmlDateInZone(date interface{}, zone string) string { + return dateInZone("2006-01-02", date, zone) +} + +func dateInZone(fmt string, date interface{}, zone string) string { + var t time.Time + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case *time.Time: + t = *date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + case int32: + t = time.Unix(int64(date), 0) + } + + loc, err := time.LoadLocation(zone) + if err != nil { + loc, _ = time.LoadLocation("UTC") + } + + return t.In(loc).Format(fmt) +} + +func dateModify(fmt string, date time.Time) time.Time { + d, err := time.ParseDuration(fmt) + if err != nil { + return date + } + return date.Add(d) +} + +func mustDateModify(fmt string, date time.Time) (time.Time, error) { + d, err := time.ParseDuration(fmt) + if err != nil { + return time.Time{}, err + } + return date.Add(d), nil +} + +func dateAgo(date interface{}) string { + var t time.Time + + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + } + // Drop resolution to seconds + duration := time.Since(t).Round(time.Second) + return duration.String() +} + +func duration(sec interface{}) string { + var n int64 + switch value := sec.(type) { + default: + n = 0 + case string: + n, _ = strconv.ParseInt(value, 10, 64) + case int64: + n = value + } + return (time.Duration(n) * time.Second).String() +} + +func durationRound(duration interface{}) string { + var d time.Duration + switch duration := duration.(type) { + default: + d = 0 + case string: + d, _ = time.ParseDuration(duration) + case int64: + d = time.Duration(duration) + case time.Time: + d = time.Since(duration) + } + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + } + + var ( + year = uint64(time.Hour) * 24 * 365 + month = uint64(time.Hour) * 24 * 30 + day = uint64(time.Hour) * 24 + hour = uint64(time.Hour) + minute = uint64(time.Minute) + second = uint64(time.Second) + ) + switch { + case u > year: + return strconv.FormatUint(u/year, 10) + "y" + case u > month: + return strconv.FormatUint(u/month, 10) + "mo" + case u > day: + return strconv.FormatUint(u/day, 10) + "d" + case u > hour: + return strconv.FormatUint(u/hour, 10) + "h" + case u > minute: + return strconv.FormatUint(u/minute, 10) + "m" + case u > second: + return strconv.FormatUint(u/second, 10) + "s" + } + return "0s" +} + +func toDate(fmt, str string) time.Time { + t, _ := time.ParseInLocation(fmt, str, time.Local) + return t +} + +func mustToDate(fmt, str string) (time.Time, error) { + return time.ParseInLocation(fmt, str, time.Local) +} + +func unixEpoch(date time.Time) string { + return strconv.FormatInt(date.Unix(), 10) +} diff --git a/vendor/github.com/go-task/slim-sprig/defaults.go b/vendor/github.com/go-task/slim-sprig/defaults.go new file mode 100644 index 00000000..b9f97966 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/defaults.go @@ -0,0 +1,163 @@ +package sprig + +import ( + "bytes" + "encoding/json" + "math/rand" + "reflect" + "strings" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// dfault checks whether `given` is set, and returns default if not set. +// +// This returns `d` if `given` appears not to be set, and `given` otherwise. +// +// For numeric types 0 is unset. +// For strings, maps, arrays, and slices, len() = 0 is considered unset. +// For bool, false is unset. +// Structs are never considered unset. +// +// For everything else, including pointers, a nil value is unset. +func dfault(d interface{}, given ...interface{}) interface{} { + + if empty(given) || empty(given[0]) { + return d + } + return given[0] +} + +// empty returns true if the given value has the zero value for its type. +func empty(given interface{}) bool { + g := reflect.ValueOf(given) + if !g.IsValid() { + return true + } + + // Basically adapted from text/template.isTrue + switch g.Kind() { + default: + return g.IsNil() + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return g.Len() == 0 + case reflect.Bool: + return !g.Bool() + case reflect.Complex64, reflect.Complex128: + return g.Complex() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return g.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return g.Uint() == 0 + case reflect.Float32, reflect.Float64: + return g.Float() == 0 + case reflect.Struct: + return false + } +} + +// coalesce returns the first non-empty value. +func coalesce(v ...interface{}) interface{} { + for _, val := range v { + if !empty(val) { + return val + } + } + return nil +} + +// all returns true if empty(x) is false for all values x in the list. +// If the list is empty, return true. +func all(v ...interface{}) bool { + for _, val := range v { + if empty(val) { + return false + } + } + return true +} + +// any returns true if empty(x) is false for any x in the list. +// If the list is empty, return false. +func any(v ...interface{}) bool { + for _, val := range v { + if !empty(val) { + return true + } + } + return false +} + +// fromJson decodes JSON into a structured value, ignoring errors. +func fromJson(v string) interface{} { + output, _ := mustFromJson(v) + return output +} + +// mustFromJson decodes JSON into a structured value, returning errors. +func mustFromJson(v string) (interface{}, error) { + var output interface{} + err := json.Unmarshal([]byte(v), &output) + return output, err +} + +// toJson encodes an item into a JSON string +func toJson(v interface{}) string { + output, _ := json.Marshal(v) + return string(output) +} + +func mustToJson(v interface{}) (string, error) { + output, err := json.Marshal(v) + if err != nil { + return "", err + } + return string(output), nil +} + +// toPrettyJson encodes an item into a pretty (indented) JSON string +func toPrettyJson(v interface{}) string { + output, _ := json.MarshalIndent(v, "", " ") + return string(output) +} + +func mustToPrettyJson(v interface{}) (string, error) { + output, err := json.MarshalIndent(v, "", " ") + if err != nil { + return "", err + } + return string(output), nil +} + +// toRawJson encodes an item into a JSON string with no escaping of HTML characters. +func toRawJson(v interface{}) string { + output, err := mustToRawJson(v) + if err != nil { + panic(err) + } + return string(output) +} + +// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters. +func mustToRawJson(v interface{}) (string, error) { + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(&v) + if err != nil { + return "", err + } + return strings.TrimSuffix(buf.String(), "\n"), nil +} + +// ternary returns the first value if the last value is true, otherwise returns the second value. +func ternary(vt interface{}, vf interface{}, v bool) interface{} { + if v { + return vt + } + + return vf +} diff --git a/vendor/github.com/go-task/slim-sprig/dict.go b/vendor/github.com/go-task/slim-sprig/dict.go new file mode 100644 index 00000000..77ebc61b --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/dict.go @@ -0,0 +1,118 @@ +package sprig + +func get(d map[string]interface{}, key string) interface{} { + if val, ok := d[key]; ok { + return val + } + return "" +} + +func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { + d[key] = value + return d +} + +func unset(d map[string]interface{}, key string) map[string]interface{} { + delete(d, key) + return d +} + +func hasKey(d map[string]interface{}, key string) bool { + _, ok := d[key] + return ok +} + +func pluck(key string, d ...map[string]interface{}) []interface{} { + res := []interface{}{} + for _, dict := range d { + if val, ok := dict[key]; ok { + res = append(res, val) + } + } + return res +} + +func keys(dicts ...map[string]interface{}) []string { + k := []string{} + for _, dict := range dicts { + for key := range dict { + k = append(k, key) + } + } + return k +} + +func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + for _, k := range keys { + if v, ok := dict[k]; ok { + res[k] = v + } + } + return res +} + +func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + + omit := make(map[string]bool, len(keys)) + for _, k := range keys { + omit[k] = true + } + + for k, v := range dict { + if _, ok := omit[k]; !ok { + res[k] = v + } + } + return res +} + +func dict(v ...interface{}) map[string]interface{} { + dict := map[string]interface{}{} + lenv := len(v) + for i := 0; i < lenv; i += 2 { + key := strval(v[i]) + if i+1 >= lenv { + dict[key] = "" + continue + } + dict[key] = v[i+1] + } + return dict +} + +func values(dict map[string]interface{}) []interface{} { + values := []interface{}{} + for _, value := range dict { + values = append(values, value) + } + + return values +} + +func dig(ps ...interface{}) (interface{}, error) { + if len(ps) < 3 { + panic("dig needs at least three arguments") + } + dict := ps[len(ps)-1].(map[string]interface{}) + def := ps[len(ps)-2] + ks := make([]string, len(ps)-2) + for i := 0; i < len(ks); i++ { + ks[i] = ps[i].(string) + } + + return digFromDict(dict, def, ks) +} + +func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) { + k, ns := ks[0], ks[1:len(ks)] + step, has := dict[k] + if !has { + return d, nil + } + if len(ns) == 0 { + return step, nil + } + return digFromDict(step.(map[string]interface{}), d, ns) +} diff --git a/vendor/github.com/go-task/slim-sprig/doc.go b/vendor/github.com/go-task/slim-sprig/doc.go new file mode 100644 index 00000000..aabb9d44 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/doc.go @@ -0,0 +1,19 @@ +/* +Package sprig provides template functions for Go. + +This package contains a number of utility functions for working with data +inside of Go `html/template` and `text/template` files. + +To add these functions, use the `template.Funcs()` method: + + t := templates.New("foo").Funcs(sprig.FuncMap()) + +Note that you should add the function map before you parse any template files. + + In several cases, Sprig reverses the order of arguments from the way they + appear in the standard library. This is to make it easier to pipe + arguments into functions. + +See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. +*/ +package sprig diff --git a/vendor/github.com/go-task/slim-sprig/functions.go b/vendor/github.com/go-task/slim-sprig/functions.go new file mode 100644 index 00000000..5ea74f89 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/functions.go @@ -0,0 +1,317 @@ +package sprig + +import ( + "errors" + "html/template" + "math/rand" + "os" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + ttemplate "text/template" + "time" +) + +// FuncMap produces the function map. +// +// Use this to pass the functions into the template engine: +// +// tpl := template.New("foo").Funcs(sprig.FuncMap())) +// +func FuncMap() template.FuncMap { + return HtmlFuncMap() +} + +// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. +func HermeticTxtFuncMap() ttemplate.FuncMap { + r := TxtFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. +func HermeticHtmlFuncMap() template.FuncMap { + r := HtmlFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// TxtFuncMap returns a 'text/template'.FuncMap +func TxtFuncMap() ttemplate.FuncMap { + return ttemplate.FuncMap(GenericFuncMap()) +} + +// HtmlFuncMap returns an 'html/template'.Funcmap +func HtmlFuncMap() template.FuncMap { + return template.FuncMap(GenericFuncMap()) +} + +// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. +func GenericFuncMap() map[string]interface{} { + gfm := make(map[string]interface{}, len(genericMap)) + for k, v := range genericMap { + gfm[k] = v + } + return gfm +} + +// These functions are not guaranteed to evaluate to the same result for given input, because they +// refer to the environment or global state. +var nonhermeticFunctions = []string{ + // Date functions + "date", + "date_in_zone", + "date_modify", + "now", + "htmlDate", + "htmlDateInZone", + "dateInZone", + "dateModify", + + // Strings + "randAlphaNum", + "randAlpha", + "randAscii", + "randNumeric", + "randBytes", + "uuidv4", + + // OS + "env", + "expandenv", + + // Network + "getHostByName", +} + +var genericMap = map[string]interface{}{ + "hello": func() string { return "Hello!" }, + + // Date functions + "ago": dateAgo, + "date": date, + "date_in_zone": dateInZone, + "date_modify": dateModify, + "dateInZone": dateInZone, + "dateModify": dateModify, + "duration": duration, + "durationRound": durationRound, + "htmlDate": htmlDate, + "htmlDateInZone": htmlDateInZone, + "must_date_modify": mustDateModify, + "mustDateModify": mustDateModify, + "mustToDate": mustToDate, + "now": time.Now, + "toDate": toDate, + "unixEpoch": unixEpoch, + + // Strings + "trunc": trunc, + "trim": strings.TrimSpace, + "upper": strings.ToUpper, + "lower": strings.ToLower, + "title": strings.Title, + "substr": substring, + // Switch order so that "foo" | repeat 5 + "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, + // Deprecated: Use trimAll. + "trimall": func(a, b string) string { return strings.Trim(b, a) }, + // Switch order so that "$foo" | trimall "$" + "trimAll": func(a, b string) string { return strings.Trim(b, a) }, + "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, + "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, + // Switch order so that "foobar" | contains "foo" + "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, + "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, + "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, + "quote": quote, + "squote": squote, + "cat": cat, + "indent": indent, + "nindent": nindent, + "replace": replace, + "plural": plural, + "sha1sum": sha1sum, + "sha256sum": sha256sum, + "adler32sum": adler32sum, + "toString": strval, + + // Wrap Atoi to stop errors. + "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, + "int64": toInt64, + "int": toInt, + "float64": toFloat64, + "seq": seq, + "toDecimal": toDecimal, + + //"gt": func(a, b int) bool {return a > b}, + //"gte": func(a, b int) bool {return a >= b}, + //"lt": func(a, b int) bool {return a < b}, + //"lte": func(a, b int) bool {return a <= b}, + + // split "/" foo/bar returns map[int]string{0: foo, 1: bar} + "split": split, + "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, + // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} + "splitn": splitn, + "toStrings": strslice, + + "until": until, + "untilStep": untilStep, + + // VERY basic arithmetic. + "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, + "add": func(i ...interface{}) int64 { + var a int64 = 0 + for _, b := range i { + a += toInt64(b) + } + return a + }, + "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, + "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, + "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, + "mul": func(a interface{}, v ...interface{}) int64 { + val := toInt64(a) + for _, b := range v { + val = val * toInt64(b) + } + return val + }, + "randInt": func(min, max int) int { return rand.Intn(max-min) + min }, + "biggest": max, + "max": max, + "min": min, + "maxf": maxf, + "minf": minf, + "ceil": ceil, + "floor": floor, + "round": round, + + // string slices. Note that we reverse the order b/c that's better + // for template processing. + "join": join, + "sortAlpha": sortAlpha, + + // Defaults + "default": dfault, + "empty": empty, + "coalesce": coalesce, + "all": all, + "any": any, + "compact": compact, + "mustCompact": mustCompact, + "fromJson": fromJson, + "toJson": toJson, + "toPrettyJson": toPrettyJson, + "toRawJson": toRawJson, + "mustFromJson": mustFromJson, + "mustToJson": mustToJson, + "mustToPrettyJson": mustToPrettyJson, + "mustToRawJson": mustToRawJson, + "ternary": ternary, + + // Reflection + "typeOf": typeOf, + "typeIs": typeIs, + "typeIsLike": typeIsLike, + "kindOf": kindOf, + "kindIs": kindIs, + "deepEqual": reflect.DeepEqual, + + // OS: + "env": os.Getenv, + "expandenv": os.ExpandEnv, + + // Network: + "getHostByName": getHostByName, + + // Paths: + "base": path.Base, + "dir": path.Dir, + "clean": path.Clean, + "ext": path.Ext, + "isAbs": path.IsAbs, + + // Filepaths: + "osBase": filepath.Base, + "osClean": filepath.Clean, + "osDir": filepath.Dir, + "osExt": filepath.Ext, + "osIsAbs": filepath.IsAbs, + + // Encoding: + "b64enc": base64encode, + "b64dec": base64decode, + "b32enc": base32encode, + "b32dec": base32decode, + + // Data Structures: + "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. + "list": list, + "dict": dict, + "get": get, + "set": set, + "unset": unset, + "hasKey": hasKey, + "pluck": pluck, + "keys": keys, + "pick": pick, + "omit": omit, + "values": values, + + "append": push, "push": push, + "mustAppend": mustPush, "mustPush": mustPush, + "prepend": prepend, + "mustPrepend": mustPrepend, + "first": first, + "mustFirst": mustFirst, + "rest": rest, + "mustRest": mustRest, + "last": last, + "mustLast": mustLast, + "initial": initial, + "mustInitial": mustInitial, + "reverse": reverse, + "mustReverse": mustReverse, + "uniq": uniq, + "mustUniq": mustUniq, + "without": without, + "mustWithout": mustWithout, + "has": has, + "mustHas": mustHas, + "slice": slice, + "mustSlice": mustSlice, + "concat": concat, + "dig": dig, + "chunk": chunk, + "mustChunk": mustChunk, + + // Flow Control: + "fail": func(msg string) (string, error) { return "", errors.New(msg) }, + + // Regex + "regexMatch": regexMatch, + "mustRegexMatch": mustRegexMatch, + "regexFindAll": regexFindAll, + "mustRegexFindAll": mustRegexFindAll, + "regexFind": regexFind, + "mustRegexFind": mustRegexFind, + "regexReplaceAll": regexReplaceAll, + "mustRegexReplaceAll": mustRegexReplaceAll, + "regexReplaceAllLiteral": regexReplaceAllLiteral, + "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral, + "regexSplit": regexSplit, + "mustRegexSplit": mustRegexSplit, + "regexQuoteMeta": regexQuoteMeta, + + // URLs: + "urlParse": urlParse, + "urlJoin": urlJoin, +} diff --git a/vendor/github.com/go-task/slim-sprig/list.go b/vendor/github.com/go-task/slim-sprig/list.go new file mode 100644 index 00000000..ca0fbb78 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/list.go @@ -0,0 +1,464 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// Reflection is used in these functions so that slices and arrays of strings, +// ints, and other types not implementing []interface{} can be worked with. +// For example, this is useful if you need to work on the output of regexs. + +func list(v ...interface{}) []interface{} { + return v +} + +func push(list interface{}, v interface{}) []interface{} { + l, err := mustPush(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPush(list interface{}, v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append(nl, v), nil + + default: + return nil, fmt.Errorf("Cannot push on type %s", tp) + } +} + +func prepend(list interface{}, v interface{}) []interface{} { + l, err := mustPrepend(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) { + //return append([]interface{}{v}, list...) + + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append([]interface{}{v}, nl...), nil + + default: + return nil, fmt.Errorf("Cannot prepend on type %s", tp) + } +} + +func chunk(size int, list interface{}) [][]interface{} { + l, err := mustChunk(size, list) + if err != nil { + panic(err) + } + + return l +} + +func mustChunk(size int, list interface{}) ([][]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + + cs := int(math.Floor(float64(l-1)/float64(size)) + 1) + nl := make([][]interface{}, cs) + + for i := 0; i < cs; i++ { + clen := size + if i == cs-1 { + clen = int(math.Floor(math.Mod(float64(l), float64(size)))) + if clen == 0 { + clen = size + } + } + + nl[i] = make([]interface{}, clen) + + for j := 0; j < clen; j++ { + ix := i*size + j + nl[i][j] = l2.Index(ix).Interface() + } + } + + return nl, nil + + default: + return nil, fmt.Errorf("Cannot chunk type %s", tp) + } +} + +func last(list interface{}) interface{} { + l, err := mustLast(list) + if err != nil { + panic(err) + } + + return l +} + +func mustLast(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(l - 1).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find last on type %s", tp) + } +} + +func first(list interface{}) interface{} { + l, err := mustFirst(list) + if err != nil { + panic(err) + } + + return l +} + +func mustFirst(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(0).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find first on type %s", tp) + } +} + +func rest(list interface{}) []interface{} { + l, err := mustRest(list) + if err != nil { + panic(err) + } + + return l +} + +func mustRest(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 1; i < l; i++ { + nl[i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find rest on type %s", tp) + } +} + +func initial(list interface{}) []interface{} { + l, err := mustInitial(list) + if err != nil { + panic(err) + } + + return l +} + +func mustInitial(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 0; i < l-1; i++ { + nl[i] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find initial on type %s", tp) + } +} + +func sortAlpha(list interface{}) []string { + k := reflect.Indirect(reflect.ValueOf(list)).Kind() + switch k { + case reflect.Slice, reflect.Array: + a := strslice(list) + s := sort.StringSlice(a) + s.Sort() + return s + } + return []string{strval(list)} +} + +func reverse(v interface{}) []interface{} { + l, err := mustReverse(v) + if err != nil { + panic(err) + } + + return l +} + +func mustReverse(v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(v).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(v) + + l := l2.Len() + // We do not sort in place because the incoming array should not be altered. + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[l-i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find reverse on type %s", tp) + } +} + +func compact(list interface{}) []interface{} { + l, err := mustCompact(list) + if err != nil { + panic(err) + } + + return l +} + +func mustCompact(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !empty(item) { + nl = append(nl, item) + } + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot compact on type %s", tp) + } +} + +func uniq(list interface{}) []interface{} { + l, err := mustUniq(list) + if err != nil { + panic(err) + } + + return l +} + +func mustUniq(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + dest := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(dest, item) { + dest = append(dest, item) + } + } + + return dest, nil + default: + return nil, fmt.Errorf("Cannot find uniq on type %s", tp) + } +} + +func inList(haystack []interface{}, needle interface{}) bool { + for _, h := range haystack { + if reflect.DeepEqual(needle, h) { + return true + } + } + return false +} + +func without(list interface{}, omit ...interface{}) []interface{} { + l, err := mustWithout(list, omit...) + if err != nil { + panic(err) + } + + return l +} + +func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + res := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(omit, item) { + res = append(res, item) + } + } + + return res, nil + default: + return nil, fmt.Errorf("Cannot find without on type %s", tp) + } +} + +func has(needle interface{}, haystack interface{}) bool { + l, err := mustHas(needle, haystack) + if err != nil { + panic(err) + } + + return l +} + +func mustHas(needle interface{}, haystack interface{}) (bool, error) { + if haystack == nil { + return false, nil + } + tp := reflect.TypeOf(haystack).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(haystack) + var item interface{} + l := l2.Len() + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if reflect.DeepEqual(needle, item) { + return true, nil + } + } + + return false, nil + default: + return false, fmt.Errorf("Cannot find has on type %s", tp) + } +} + +// $list := [1, 2, 3, 4, 5] +// slice $list -> list[0:5] = list[:] +// slice $list 0 3 -> list[0:3] = list[:3] +// slice $list 3 5 -> list[3:5] +// slice $list 3 -> list[3:5] = list[3:] +func slice(list interface{}, indices ...interface{}) interface{} { + l, err := mustSlice(list, indices...) + if err != nil { + panic(err) + } + + return l +} + +func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + var start, end int + if len(indices) > 0 { + start = toInt(indices[0]) + } + if len(indices) < 2 { + end = l + } else { + end = toInt(indices[1]) + } + + return l2.Slice(start, end).Interface(), nil + default: + return nil, fmt.Errorf("list should be type of slice or array but %s", tp) + } +} + +func concat(lists ...interface{}) interface{} { + var res []interface{} + for _, list := range lists { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + for i := 0; i < l2.Len(); i++ { + res = append(res, l2.Index(i).Interface()) + } + default: + panic(fmt.Sprintf("Cannot concat type %s as list", tp)) + } + } + return res +} diff --git a/vendor/github.com/go-task/slim-sprig/network.go b/vendor/github.com/go-task/slim-sprig/network.go new file mode 100644 index 00000000..108d78a9 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/network.go @@ -0,0 +1,12 @@ +package sprig + +import ( + "math/rand" + "net" +) + +func getHostByName(name string) string { + addrs, _ := net.LookupHost(name) + //TODO: add error handing when release v3 comes out + return addrs[rand.Intn(len(addrs))] +} diff --git a/vendor/github.com/go-task/slim-sprig/numeric.go b/vendor/github.com/go-task/slim-sprig/numeric.go new file mode 100644 index 00000000..98cbb37a --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/numeric.go @@ -0,0 +1,228 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "strconv" + "strings" +) + +// toFloat64 converts 64-bit floats +func toFloat64(v interface{}) float64 { + if str, ok := v.(string); ok { + iv, err := strconv.ParseFloat(str, 64) + if err != nil { + return 0 + } + return iv + } + + val := reflect.Indirect(reflect.ValueOf(v)) + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return float64(val.Int()) + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return float64(val.Uint()) + case reflect.Uint, reflect.Uint64: + return float64(val.Uint()) + case reflect.Float32, reflect.Float64: + return val.Float() + case reflect.Bool: + if val.Bool() { + return 1 + } + return 0 + default: + return 0 + } +} + +func toInt(v interface{}) int { + //It's not optimal. Bud I don't want duplicate toInt64 code. + return int(toInt64(v)) +} + +// toInt64 converts integer types to 64-bit integers +func toInt64(v interface{}) int64 { + if str, ok := v.(string); ok { + iv, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return 0 + } + return iv + } + + val := reflect.Indirect(reflect.ValueOf(v)) + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return val.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return int64(val.Uint()) + case reflect.Uint, reflect.Uint64: + tv := val.Uint() + if tv <= math.MaxInt64 { + return int64(tv) + } + // TODO: What is the sensible thing to do here? + return math.MaxInt64 + case reflect.Float32, reflect.Float64: + return int64(val.Float()) + case reflect.Bool: + if val.Bool() { + return 1 + } + return 0 + default: + return 0 + } +} + +func max(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb > aa { + aa = bb + } + } + return aa +} + +func maxf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Max(aa, bb) + } + return aa +} + +func min(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb < aa { + aa = bb + } + } + return aa +} + +func minf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Min(aa, bb) + } + return aa +} + +func until(count int) []int { + step := 1 + if count < 0 { + step = -1 + } + return untilStep(0, count, step) +} + +func untilStep(start, stop, step int) []int { + v := []int{} + + if stop < start { + if step >= 0 { + return v + } + for i := start; i > stop; i += step { + v = append(v, i) + } + return v + } + + if step <= 0 { + return v + } + for i := start; i < stop; i += step { + v = append(v, i) + } + return v +} + +func floor(a interface{}) float64 { + aa := toFloat64(a) + return math.Floor(aa) +} + +func ceil(a interface{}) float64 { + aa := toFloat64(a) + return math.Ceil(aa) +} + +func round(a interface{}, p int, rOpt ...float64) float64 { + roundOn := .5 + if len(rOpt) > 0 { + roundOn = rOpt[0] + } + val := toFloat64(a) + places := toFloat64(p) + + var round float64 + pow := math.Pow(10, places) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + return round / pow +} + +// converts unix octal to decimal +func toDecimal(v interface{}) int64 { + result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) + if err != nil { + return 0 + } + return result +} + +func seq(params ...int) string { + increment := 1 + switch len(params) { + case 0: + return "" + case 1: + start := 1 + end := params[0] + if end < start { + increment = -1 + } + return intArrayToString(untilStep(start, end+increment, increment), " ") + case 3: + start := params[0] + end := params[2] + step := params[1] + if end < start { + increment = -1 + if step > 0 { + return "" + } + } + return intArrayToString(untilStep(start, end+increment, step), " ") + case 2: + start := params[0] + end := params[1] + step := 1 + if end < start { + step = -1 + } + return intArrayToString(untilStep(start, end+step, step), " ") + default: + return "" + } +} + +func intArrayToString(slice []int, delimeter string) string { + return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]") +} diff --git a/vendor/github.com/go-task/slim-sprig/reflect.go b/vendor/github.com/go-task/slim-sprig/reflect.go new file mode 100644 index 00000000..8a65c132 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/reflect.go @@ -0,0 +1,28 @@ +package sprig + +import ( + "fmt" + "reflect" +) + +// typeIs returns true if the src is the type named in target. +func typeIs(target string, src interface{}) bool { + return target == typeOf(src) +} + +func typeIsLike(target string, src interface{}) bool { + t := typeOf(src) + return target == t || "*"+target == t +} + +func typeOf(src interface{}) string { + return fmt.Sprintf("%T", src) +} + +func kindIs(target string, src interface{}) bool { + return target == kindOf(src) +} + +func kindOf(src interface{}) string { + return reflect.ValueOf(src).Kind().String() +} diff --git a/vendor/github.com/go-task/slim-sprig/regex.go b/vendor/github.com/go-task/slim-sprig/regex.go new file mode 100644 index 00000000..fab55101 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/regex.go @@ -0,0 +1,83 @@ +package sprig + +import ( + "regexp" +) + +func regexMatch(regex string, s string) bool { + match, _ := regexp.MatchString(regex, s) + return match +} + +func mustRegexMatch(regex string, s string) (bool, error) { + return regexp.MatchString(regex, s) +} + +func regexFindAll(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.FindAllString(s, n) +} + +func mustRegexFindAll(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.FindAllString(s, n), nil +} + +func regexFind(regex string, s string) string { + r := regexp.MustCompile(regex) + return r.FindString(s) +} + +func mustRegexFind(regex string, s string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.FindString(s), nil +} + +func regexReplaceAll(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllString(s, repl) +} + +func mustRegexReplaceAll(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllString(s, repl), nil +} + +func regexReplaceAllLiteral(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllLiteralString(s, repl) +} + +func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllLiteralString(s, repl), nil +} + +func regexSplit(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.Split(s, n) +} + +func mustRegexSplit(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.Split(s, n), nil +} + +func regexQuoteMeta(s string) string { + return regexp.QuoteMeta(s) +} diff --git a/vendor/github.com/go-task/slim-sprig/strings.go b/vendor/github.com/go-task/slim-sprig/strings.go new file mode 100644 index 00000000..3c62d6b6 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/strings.go @@ -0,0 +1,189 @@ +package sprig + +import ( + "encoding/base32" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" +) + +func base64encode(v string) string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +func base64decode(v string) string { + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func base32encode(v string) string { + return base32.StdEncoding.EncodeToString([]byte(v)) +} + +func base32decode(v string) string { + data, err := base32.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func quote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("%q", strval(s))) + } + } + return strings.Join(out, " ") +} + +func squote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("'%v'", s)) + } + } + return strings.Join(out, " ") +} + +func cat(v ...interface{}) string { + v = removeNilElements(v) + r := strings.TrimSpace(strings.Repeat("%v ", len(v))) + return fmt.Sprintf(r, v...) +} + +func indent(spaces int, v string) string { + pad := strings.Repeat(" ", spaces) + return pad + strings.Replace(v, "\n", "\n"+pad, -1) +} + +func nindent(spaces int, v string) string { + return "\n" + indent(spaces, v) +} + +func replace(old, new, src string) string { + return strings.Replace(src, old, new, -1) +} + +func plural(one, many string, count int) string { + if count == 1 { + return one + } + return many +} + +func strslice(v interface{}) []string { + switch v := v.(type) { + case []string: + return v + case []interface{}: + b := make([]string, 0, len(v)) + for _, s := range v { + if s != nil { + b = append(b, strval(s)) + } + } + return b + default: + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Array, reflect.Slice: + l := val.Len() + b := make([]string, 0, l) + for i := 0; i < l; i++ { + value := val.Index(i).Interface() + if value != nil { + b = append(b, strval(value)) + } + } + return b + default: + if v == nil { + return []string{} + } + + return []string{strval(v)} + } + } +} + +func removeNilElements(v []interface{}) []interface{} { + newSlice := make([]interface{}, 0, len(v)) + for _, i := range v { + if i != nil { + newSlice = append(newSlice, i) + } + } + return newSlice +} + +func strval(v interface{}) string { + switch v := v.(type) { + case string: + return v + case []byte: + return string(v) + case error: + return v.Error() + case fmt.Stringer: + return v.String() + default: + return fmt.Sprintf("%v", v) + } +} + +func trunc(c int, s string) string { + if c < 0 && len(s)+c > 0 { + return s[len(s)+c:] + } + if c >= 0 && len(s) > c { + return s[:c] + } + return s +} + +func join(sep string, v interface{}) string { + return strings.Join(strslice(v), sep) +} + +func split(sep, orig string) map[string]string { + parts := strings.Split(orig, sep) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +func splitn(sep string, n int, orig string) map[string]string { + parts := strings.SplitN(orig, sep, n) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +// substring creates a substring of the given string. +// +// If start is < 0, this calls string[:end]. +// +// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] +// +// Otherwise, this calls string[start, end]. +func substring(start, end int, s string) string { + if start < 0 { + return s[:end] + } + if end < 0 || end > len(s) { + return s[start:] + } + return s[start:end] +} diff --git a/vendor/github.com/go-task/slim-sprig/url.go b/vendor/github.com/go-task/slim-sprig/url.go new file mode 100644 index 00000000..b8e120e1 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/url.go @@ -0,0 +1,66 @@ +package sprig + +import ( + "fmt" + "net/url" + "reflect" +) + +func dictGetOrEmpty(dict map[string]interface{}, key string) string { + value, ok := dict[key] + if !ok { + return "" + } + tp := reflect.TypeOf(value).Kind() + if tp != reflect.String { + panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) + } + return reflect.ValueOf(value).String() +} + +// parses given URL to return dict object +func urlParse(v string) map[string]interface{} { + dict := map[string]interface{}{} + parsedURL, err := url.Parse(v) + if err != nil { + panic(fmt.Sprintf("unable to parse url: %s", err)) + } + dict["scheme"] = parsedURL.Scheme + dict["host"] = parsedURL.Host + dict["hostname"] = parsedURL.Hostname() + dict["path"] = parsedURL.Path + dict["query"] = parsedURL.RawQuery + dict["opaque"] = parsedURL.Opaque + dict["fragment"] = parsedURL.Fragment + if parsedURL.User != nil { + dict["userinfo"] = parsedURL.User.String() + } else { + dict["userinfo"] = "" + } + + return dict +} + +// join given dict to URL string +func urlJoin(d map[string]interface{}) string { + resURL := url.URL{ + Scheme: dictGetOrEmpty(d, "scheme"), + Host: dictGetOrEmpty(d, "host"), + Path: dictGetOrEmpty(d, "path"), + RawQuery: dictGetOrEmpty(d, "query"), + Opaque: dictGetOrEmpty(d, "opaque"), + Fragment: dictGetOrEmpty(d, "fragment"), + } + userinfo := dictGetOrEmpty(d, "userinfo") + var user *url.Userinfo + if userinfo != "" { + tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) + if err != nil { + panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) + } + user = tempURL.User + } + + resURL.User = user + return resURL.String() +} diff --git a/vendor/github.com/google/pprof/AUTHORS b/vendor/github.com/google/pprof/AUTHORS new file mode 100644 index 00000000..fd736cb1 --- /dev/null +++ b/vendor/github.com/google/pprof/AUTHORS @@ -0,0 +1,7 @@ +# This is the official list of pprof authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. +Google Inc. \ No newline at end of file diff --git a/vendor/github.com/google/pprof/CONTRIBUTORS b/vendor/github.com/google/pprof/CONTRIBUTORS new file mode 100644 index 00000000..8c8c37d2 --- /dev/null +++ b/vendor/github.com/google/pprof/CONTRIBUTORS @@ -0,0 +1,16 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name +Raul Silvera +Tipp Moseley +Hyoun Kyu Cho +Martin Spier +Taco de Wolff +Andrew Hunter diff --git a/vendor/github.com/google/pprof/LICENSE b/vendor/github.com/google/pprof/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/google/pprof/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go new file mode 100644 index 00000000..182c926b --- /dev/null +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -0,0 +1,588 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "errors" + "sort" + "strings" +) + +func (p *Profile) decoder() []decoder { + return profileDecoder +} + +// preEncode populates the unexported fields to be used by encode +// (with suffix X) from the corresponding exported fields. The +// exported fields are cleared up to facilitate testing. +func (p *Profile) preEncode() { + strings := make(map[string]int) + addString(strings, "") + + for _, st := range p.SampleType { + st.typeX = addString(strings, st.Type) + st.unitX = addString(strings, st.Unit) + } + + for _, s := range p.Sample { + s.labelX = nil + var keys []string + for k := range s.Label { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := s.Label[k] + for _, v := range vs { + s.labelX = append(s.labelX, + label{ + keyX: addString(strings, k), + strX: addString(strings, v), + }, + ) + } + } + var numKeys []string + for k := range s.NumLabel { + numKeys = append(numKeys, k) + } + sort.Strings(numKeys) + for _, k := range numKeys { + keyX := addString(strings, k) + vs := s.NumLabel[k] + units := s.NumUnit[k] + for i, v := range vs { + var unitX int64 + if len(units) != 0 { + unitX = addString(strings, units[i]) + } + s.labelX = append(s.labelX, + label{ + keyX: keyX, + numX: v, + unitX: unitX, + }, + ) + } + } + s.locationIDX = make([]uint64, len(s.Location)) + for i, loc := range s.Location { + s.locationIDX[i] = loc.ID + } + } + + for _, m := range p.Mapping { + m.fileX = addString(strings, m.File) + m.buildIDX = addString(strings, m.BuildID) + } + + for _, l := range p.Location { + for i, ln := range l.Line { + if ln.Function != nil { + l.Line[i].functionIDX = ln.Function.ID + } else { + l.Line[i].functionIDX = 0 + } + } + if l.Mapping != nil { + l.mappingIDX = l.Mapping.ID + } else { + l.mappingIDX = 0 + } + } + for _, f := range p.Function { + f.nameX = addString(strings, f.Name) + f.systemNameX = addString(strings, f.SystemName) + f.filenameX = addString(strings, f.Filename) + } + + p.dropFramesX = addString(strings, p.DropFrames) + p.keepFramesX = addString(strings, p.KeepFrames) + + if pt := p.PeriodType; pt != nil { + pt.typeX = addString(strings, pt.Type) + pt.unitX = addString(strings, pt.Unit) + } + + p.commentX = nil + for _, c := range p.Comments { + p.commentX = append(p.commentX, addString(strings, c)) + } + + p.defaultSampleTypeX = addString(strings, p.DefaultSampleType) + + p.stringTable = make([]string, len(strings)) + for s, i := range strings { + p.stringTable[i] = s + } +} + +func (p *Profile) encode(b *buffer) { + for _, x := range p.SampleType { + encodeMessage(b, 1, x) + } + for _, x := range p.Sample { + encodeMessage(b, 2, x) + } + for _, x := range p.Mapping { + encodeMessage(b, 3, x) + } + for _, x := range p.Location { + encodeMessage(b, 4, x) + } + for _, x := range p.Function { + encodeMessage(b, 5, x) + } + encodeStrings(b, 6, p.stringTable) + encodeInt64Opt(b, 7, p.dropFramesX) + encodeInt64Opt(b, 8, p.keepFramesX) + encodeInt64Opt(b, 9, p.TimeNanos) + encodeInt64Opt(b, 10, p.DurationNanos) + if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) { + encodeMessage(b, 11, p.PeriodType) + } + encodeInt64Opt(b, 12, p.Period) + encodeInt64s(b, 13, p.commentX) + encodeInt64(b, 14, p.defaultSampleTypeX) +} + +var profileDecoder = []decoder{ + nil, // 0 + // repeated ValueType sample_type = 1 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.SampleType = append(pp.SampleType, x) + return decodeMessage(b, x) + }, + // repeated Sample sample = 2 + func(b *buffer, m message) error { + x := new(Sample) + pp := m.(*Profile) + pp.Sample = append(pp.Sample, x) + return decodeMessage(b, x) + }, + // repeated Mapping mapping = 3 + func(b *buffer, m message) error { + x := new(Mapping) + pp := m.(*Profile) + pp.Mapping = append(pp.Mapping, x) + return decodeMessage(b, x) + }, + // repeated Location location = 4 + func(b *buffer, m message) error { + x := new(Location) + x.Line = b.tmpLines[:0] // Use shared space temporarily + pp := m.(*Profile) + pp.Location = append(pp.Location, x) + err := decodeMessage(b, x) + b.tmpLines = x.Line[:0] + // Copy to shrink size and detach from shared space. + x.Line = append([]Line(nil), x.Line...) + return err + }, + // repeated Function function = 5 + func(b *buffer, m message) error { + x := new(Function) + pp := m.(*Profile) + pp.Function = append(pp.Function, x) + return decodeMessage(b, x) + }, + // repeated string string_table = 6 + func(b *buffer, m message) error { + err := decodeStrings(b, &m.(*Profile).stringTable) + if err != nil { + return err + } + if m.(*Profile).stringTable[0] != "" { + return errors.New("string_table[0] must be ''") + } + return nil + }, + // int64 drop_frames = 7 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) }, + // int64 keep_frames = 8 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) }, + // int64 time_nanos = 9 + func(b *buffer, m message) error { + if m.(*Profile).TimeNanos != 0 { + return errConcatProfile + } + return decodeInt64(b, &m.(*Profile).TimeNanos) + }, + // int64 duration_nanos = 10 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) }, + // ValueType period_type = 11 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.PeriodType = x + return decodeMessage(b, x) + }, + // int64 period = 12 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) }, + // repeated int64 comment = 13 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) }, + // int64 defaultSampleType = 14 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) }, +} + +// postDecode takes the unexported fields populated by decode (with +// suffix X) and populates the corresponding exported fields. +// The unexported fields are cleared up to facilitate testing. +func (p *Profile) postDecode() error { + var err error + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + mappingIds := make([]*Mapping, len(p.Mapping)+1) + for _, m := range p.Mapping { + m.File, err = getString(p.stringTable, &m.fileX, err) + m.BuildID, err = getString(p.stringTable, &m.buildIDX, err) + if m.ID < uint64(len(mappingIds)) { + mappingIds[m.ID] = m + } else { + mappings[m.ID] = m + } + + // If this a main linux kernel mapping with a relocation symbol suffix + // ("[kernel.kallsyms]_text"), extract said suffix. + // It is fairly hacky to handle at this level, but the alternatives appear even worse. + const prefix = "[kernel.kallsyms]" + if strings.HasPrefix(m.File, prefix) { + m.KernelRelocationSymbol = m.File[len(prefix):] + } + } + + functions := make(map[uint64]*Function, len(p.Function)) + functionIds := make([]*Function, len(p.Function)+1) + for _, f := range p.Function { + f.Name, err = getString(p.stringTable, &f.nameX, err) + f.SystemName, err = getString(p.stringTable, &f.systemNameX, err) + f.Filename, err = getString(p.stringTable, &f.filenameX, err) + if f.ID < uint64(len(functionIds)) { + functionIds[f.ID] = f + } else { + functions[f.ID] = f + } + } + + locations := make(map[uint64]*Location, len(p.Location)) + locationIds := make([]*Location, len(p.Location)+1) + for _, l := range p.Location { + if id := l.mappingIDX; id < uint64(len(mappingIds)) { + l.Mapping = mappingIds[id] + } else { + l.Mapping = mappings[id] + } + l.mappingIDX = 0 + for i, ln := range l.Line { + if id := ln.functionIDX; id != 0 { + l.Line[i].functionIDX = 0 + if id < uint64(len(functionIds)) { + l.Line[i].Function = functionIds[id] + } else { + l.Line[i].Function = functions[id] + } + } + } + if l.ID < uint64(len(locationIds)) { + locationIds[l.ID] = l + } else { + locations[l.ID] = l + } + } + + for _, st := range p.SampleType { + st.Type, err = getString(p.stringTable, &st.typeX, err) + st.Unit, err = getString(p.stringTable, &st.unitX, err) + } + + // Pre-allocate space for all locations. + numLocations := 0 + for _, s := range p.Sample { + numLocations += len(s.locationIDX) + } + locBuffer := make([]*Location, numLocations) + + for _, s := range p.Sample { + if len(s.labelX) > 0 { + labels := make(map[string][]string, len(s.labelX)) + numLabels := make(map[string][]int64, len(s.labelX)) + numUnits := make(map[string][]string, len(s.labelX)) + for _, l := range s.labelX { + var key, value string + key, err = getString(p.stringTable, &l.keyX, err) + if l.strX != 0 { + value, err = getString(p.stringTable, &l.strX, err) + labels[key] = append(labels[key], value) + } else if l.numX != 0 || l.unitX != 0 { + numValues := numLabels[key] + units := numUnits[key] + if l.unitX != 0 { + var unit string + unit, err = getString(p.stringTable, &l.unitX, err) + units = padStringArray(units, len(numValues)) + numUnits[key] = append(units, unit) + } + numLabels[key] = append(numLabels[key], l.numX) + } + } + if len(labels) > 0 { + s.Label = labels + } + if len(numLabels) > 0 { + s.NumLabel = numLabels + for key, units := range numUnits { + if len(units) > 0 { + numUnits[key] = padStringArray(units, len(numLabels[key])) + } + } + s.NumUnit = numUnits + } + } + + s.Location = locBuffer[:len(s.locationIDX)] + locBuffer = locBuffer[len(s.locationIDX):] + for i, lid := range s.locationIDX { + if lid < uint64(len(locationIds)) { + s.Location[i] = locationIds[lid] + } else { + s.Location[i] = locations[lid] + } + } + s.locationIDX = nil + } + + p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err) + p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err) + + if pt := p.PeriodType; pt == nil { + p.PeriodType = &ValueType{} + } + + if pt := p.PeriodType; pt != nil { + pt.Type, err = getString(p.stringTable, &pt.typeX, err) + pt.Unit, err = getString(p.stringTable, &pt.unitX, err) + } + + for _, i := range p.commentX { + var c string + c, err = getString(p.stringTable, &i, err) + p.Comments = append(p.Comments, c) + } + + p.commentX = nil + p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err) + p.stringTable = nil + return err +} + +// padStringArray pads arr with enough empty strings to make arr +// length l when arr's length is less than l. +func padStringArray(arr []string, l int) []string { + if l <= len(arr) { + return arr + } + return append(arr, make([]string, l-len(arr))...) +} + +func (p *ValueType) decoder() []decoder { + return valueTypeDecoder +} + +func (p *ValueType) encode(b *buffer) { + encodeInt64Opt(b, 1, p.typeX) + encodeInt64Opt(b, 2, p.unitX) +} + +var valueTypeDecoder = []decoder{ + nil, // 0 + // optional int64 type = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) }, + // optional int64 unit = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) }, +} + +func (p *Sample) decoder() []decoder { + return sampleDecoder +} + +func (p *Sample) encode(b *buffer) { + encodeUint64s(b, 1, p.locationIDX) + encodeInt64s(b, 2, p.Value) + for _, x := range p.labelX { + encodeMessage(b, 3, x) + } +} + +var sampleDecoder = []decoder{ + nil, // 0 + // repeated uint64 location = 1 + func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) }, + // repeated int64 value = 2 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) }, + // repeated Label label = 3 + func(b *buffer, m message) error { + s := m.(*Sample) + n := len(s.labelX) + s.labelX = append(s.labelX, label{}) + return decodeMessage(b, &s.labelX[n]) + }, +} + +func (p label) decoder() []decoder { + return labelDecoder +} + +func (p label) encode(b *buffer) { + encodeInt64Opt(b, 1, p.keyX) + encodeInt64Opt(b, 2, p.strX) + encodeInt64Opt(b, 3, p.numX) + encodeInt64Opt(b, 4, p.unitX) +} + +var labelDecoder = []decoder{ + nil, // 0 + // optional int64 key = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).keyX) }, + // optional int64 str = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) }, + // optional int64 num = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) }, + // optional int64 num = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) }, +} + +func (p *Mapping) decoder() []decoder { + return mappingDecoder +} + +func (p *Mapping) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.Start) + encodeUint64Opt(b, 3, p.Limit) + encodeUint64Opt(b, 4, p.Offset) + encodeInt64Opt(b, 5, p.fileX) + encodeInt64Opt(b, 6, p.buildIDX) + encodeBoolOpt(b, 7, p.HasFunctions) + encodeBoolOpt(b, 8, p.HasFilenames) + encodeBoolOpt(b, 9, p.HasLineNumbers) + encodeBoolOpt(b, 10, p.HasInlineFrames) +} + +var mappingDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10 +} + +func (p *Location) decoder() []decoder { + return locationDecoder +} + +func (p *Location) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.mappingIDX) + encodeUint64Opt(b, 3, p.Address) + for i := range p.Line { + encodeMessage(b, 4, &p.Line[i]) + } + encodeBoolOpt(b, 5, p.IsFolded) +} + +var locationDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3; + func(b *buffer, m message) error { // repeated Line line = 4 + pp := m.(*Location) + n := len(pp.Line) + pp.Line = append(pp.Line, Line{}) + return decodeMessage(b, &pp.Line[n]) + }, + func(b *buffer, m message) error { return decodeBool(b, &m.(*Location).IsFolded) }, // optional bool is_folded = 5; +} + +func (p *Line) decoder() []decoder { + return lineDecoder +} + +func (p *Line) encode(b *buffer) { + encodeUint64Opt(b, 1, p.functionIDX) + encodeInt64Opt(b, 2, p.Line) +} + +var lineDecoder = []decoder{ + nil, // 0 + // optional uint64 function_id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) }, + // optional int64 line = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) }, +} + +func (p *Function) decoder() []decoder { + return functionDecoder +} + +func (p *Function) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeInt64Opt(b, 2, p.nameX) + encodeInt64Opt(b, 3, p.systemNameX) + encodeInt64Opt(b, 4, p.filenameX) + encodeInt64Opt(b, 5, p.StartLine) +} + +var functionDecoder = []decoder{ + nil, // 0 + // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) }, + // optional int64 function_name = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) }, + // optional int64 function_system_name = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) }, + // repeated int64 filename = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) }, + // optional int64 start_line = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) }, +} + +func addString(strings map[string]int, s string) int64 { + i, ok := strings[s] + if !ok { + i = len(strings) + strings[s] = i + } + return int64(i) +} + +func getString(strings []string, strng *int64, err error) (string, error) { + if err != nil { + return "", err + } + s := int(*strng) + if s < 0 || s >= len(strings) { + return "", errMalformed + } + *strng = 0 + return strings[s], nil +} diff --git a/vendor/github.com/google/pprof/profile/filter.go b/vendor/github.com/google/pprof/profile/filter.go new file mode 100644 index 00000000..c794b939 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/filter.go @@ -0,0 +1,274 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +// Implements methods to filter samples from profiles. + +import "regexp" + +// FilterSamplesByName filters the samples in a profile and only keeps +// samples where at least one frame matches focus but none match ignore. +// Returns true is the corresponding regexp matched at least one sample. +func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) { + if focus == nil && ignore == nil && hide == nil && show == nil { + fm = true // Missing focus implies a match + return + } + focusOrIgnore := make(map[uint64]bool) + hidden := make(map[uint64]bool) + for _, l := range p.Location { + if ignore != nil && l.matchesName(ignore) { + im = true + focusOrIgnore[l.ID] = false + } else if focus == nil || l.matchesName(focus) { + fm = true + focusOrIgnore[l.ID] = true + } + + if hide != nil && l.matchesName(hide) { + hm = true + l.Line = l.unmatchedLines(hide) + if len(l.Line) == 0 { + hidden[l.ID] = true + } + } + if show != nil { + l.Line = l.matchedLines(show) + if len(l.Line) == 0 { + hidden[l.ID] = true + } else { + hnm = true + } + } + } + + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + if focusedAndNotIgnored(sample.Location, focusOrIgnore) { + if len(hidden) > 0 { + var locs []*Location + for _, loc := range sample.Location { + if !hidden[loc.ID] { + locs = append(locs, loc) + } + } + if len(locs) == 0 { + // Remove sample with no locations (by not adding it to s). + continue + } + sample.Location = locs + } + s = append(s, sample) + } + } + p.Sample = s + + return +} + +// ShowFrom drops all stack frames above the highest matching frame and returns +// whether a match was found. If showFrom is nil it returns false and does not +// modify the profile. +// +// Example: consider a sample with frames [A, B, C, B], where A is the root. +// ShowFrom(nil) returns false and has frames [A, B, C, B]. +// ShowFrom(A) returns true and has frames [A, B, C, B]. +// ShowFrom(B) returns true and has frames [B, C, B]. +// ShowFrom(C) returns true and has frames [C, B]. +// ShowFrom(D) returns false and drops the sample because no frames remain. +func (p *Profile) ShowFrom(showFrom *regexp.Regexp) (matched bool) { + if showFrom == nil { + return false + } + // showFromLocs stores location IDs that matched ShowFrom. + showFromLocs := make(map[uint64]bool) + // Apply to locations. + for _, loc := range p.Location { + if filterShowFromLocation(loc, showFrom) { + showFromLocs[loc.ID] = true + matched = true + } + } + // For all samples, strip locations after the highest matching one. + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + for i := len(sample.Location) - 1; i >= 0; i-- { + if showFromLocs[sample.Location[i].ID] { + sample.Location = sample.Location[:i+1] + s = append(s, sample) + break + } + } + } + p.Sample = s + return matched +} + +// filterShowFromLocation tests a showFrom regex against a location, removes +// lines after the last match and returns whether a match was found. If the +// mapping is matched, then all lines are kept. +func filterShowFromLocation(loc *Location, showFrom *regexp.Regexp) bool { + if m := loc.Mapping; m != nil && showFrom.MatchString(m.File) { + return true + } + if i := loc.lastMatchedLineIndex(showFrom); i >= 0 { + loc.Line = loc.Line[:i+1] + return true + } + return false +} + +// lastMatchedLineIndex returns the index of the last line that matches a regex, +// or -1 if no match is found. +func (loc *Location) lastMatchedLineIndex(re *regexp.Regexp) int { + for i := len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return i + } + } + } + return -1 +} + +// FilterTagsByName filters the tags in a profile and only keeps +// tags that match show and not hide. +func (p *Profile) FilterTagsByName(show, hide *regexp.Regexp) (sm, hm bool) { + matchRemove := func(name string) bool { + matchShow := show == nil || show.MatchString(name) + matchHide := hide != nil && hide.MatchString(name) + + if matchShow { + sm = true + } + if matchHide { + hm = true + } + return !matchShow || matchHide + } + for _, s := range p.Sample { + for lab := range s.Label { + if matchRemove(lab) { + delete(s.Label, lab) + } + } + for lab := range s.NumLabel { + if matchRemove(lab) { + delete(s.NumLabel, lab) + } + } + } + return +} + +// matchesName returns whether the location matches the regular +// expression. It checks any available function names, file names, and +// mapping object filename. +func (loc *Location) matchesName(re *regexp.Regexp) bool { + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return true + } + } + } + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return true + } + return false +} + +// unmatchedLines returns the lines in the location that do not match +// the regular expression. +func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return nil + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// matchedLines returns the lines in the location that match +// the regular expression. +func (loc *Location) matchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return loc.Line + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if !re.MatchString(fn.Name) && !re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// focusedAndNotIgnored looks up a slice of ids against a map of +// focused/ignored locations. The map only contains locations that are +// explicitly focused or ignored. Returns whether there is at least +// one focused location but no ignored locations. +func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool { + var f bool + for _, loc := range locs { + if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore { + if focus { + // Found focused location. Must keep searching in case there + // is an ignored one as well. + f = true + } else { + // Found ignored location. Can return false right away. + return false + } + } + } + return f +} + +// TagMatch selects tags for filtering +type TagMatch func(s *Sample) bool + +// FilterSamplesByTag removes all samples from the profile, except +// those that match focus and do not match the ignore regular +// expression. +func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) { + samples := make([]*Sample, 0, len(p.Sample)) + for _, s := range p.Sample { + focused, ignored := true, false + if focus != nil { + focused = focus(s) + } + if ignore != nil { + ignored = ignore(s) + } + fm = fm || focused + im = im || ignored + if focused && !ignored { + samples = append(samples, s) + } + } + p.Sample = samples + return +} diff --git a/vendor/github.com/google/pprof/profile/index.go b/vendor/github.com/google/pprof/profile/index.go new file mode 100644 index 00000000..bef1d604 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/index.go @@ -0,0 +1,64 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "fmt" + "strconv" + "strings" +) + +// SampleIndexByName returns the appropriate index for a value of sample index. +// If numeric, it returns the number, otherwise it looks up the text in the +// profile sample types. +func (p *Profile) SampleIndexByName(sampleIndex string) (int, error) { + if sampleIndex == "" { + if dst := p.DefaultSampleType; dst != "" { + for i, t := range sampleTypes(p) { + if t == dst { + return i, nil + } + } + } + // By default select the last sample value + return len(p.SampleType) - 1, nil + } + if i, err := strconv.Atoi(sampleIndex); err == nil { + if i < 0 || i >= len(p.SampleType) { + return 0, fmt.Errorf("sample_index %s is outside the range [0..%d]", sampleIndex, len(p.SampleType)-1) + } + return i, nil + } + + // Remove the inuse_ prefix to support legacy pprof options + // "inuse_space" and "inuse_objects" for profiles containing types + // "space" and "objects". + noInuse := strings.TrimPrefix(sampleIndex, "inuse_") + for i, t := range p.SampleType { + if t.Type == sampleIndex || t.Type == noInuse { + return i, nil + } + } + + return 0, fmt.Errorf("sample_index %q must be one of: %v", sampleIndex, sampleTypes(p)) +} + +func sampleTypes(p *Profile) []string { + types := make([]string, len(p.SampleType)) + for i, t := range p.SampleType { + types[i] = t.Type + } + return types +} diff --git a/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/vendor/github.com/google/pprof/profile/legacy_java_profile.go new file mode 100644 index 00000000..91f45e53 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/legacy_java_profile.go @@ -0,0 +1,315 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert java legacy profiles into +// the profile.proto format. + +package profile + +import ( + "bytes" + "fmt" + "io" + "path/filepath" + "regexp" + "strconv" + "strings" +) + +var ( + attributeRx = regexp.MustCompile(`([\w ]+)=([\w ]+)`) + javaSampleRx = regexp.MustCompile(` *(\d+) +(\d+) +@ +([ x0-9a-f]*)`) + javaLocationRx = regexp.MustCompile(`^\s*0x([[:xdigit:]]+)\s+(.*)\s*$`) + javaLocationFileLineRx = regexp.MustCompile(`^(.*)\s+\((.+):(-?[[:digit:]]+)\)$`) + javaLocationPathRx = regexp.MustCompile(`^(.*)\s+\((.*)\)$`) +) + +// javaCPUProfile returns a new Profile from profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{{Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}}, + } + var err error + var locs map[uint64]*Location + if b, locs, err = parseCPUSamples(b, parse, false, p); err != nil { + return nil, err + } + + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaProfile returns a new profile from heapz or contentionz +// data. b is the profile bytes after the header. +func parseJavaProfile(b []byte) (*Profile, error) { + h := bytes.SplitAfterN(b, []byte("\n"), 2) + if len(h) < 2 { + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{}, + } + header := string(bytes.TrimSpace(h[0])) + + var err error + var pType string + switch header { + case "--- heapz 1 ---": + pType = "heap" + case "--- contentionz 1 ---": + pType = "contention" + default: + return nil, errUnrecognized + } + + if b, err = parseJavaHeader(pType, h[1], p); err != nil { + return nil, err + } + var locs map[uint64]*Location + if b, locs, err = parseJavaSamples(pType, b, p); err != nil { + return nil, err + } + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaHeader parses the attribute section on a java profile and +// populates a profile. Returns the remainder of the buffer after all +// attributes. +func parseJavaHeader(pType string, b []byte, p *Profile) ([]byte, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + h := attributeRx.FindStringSubmatch(line) + if h == nil { + // Not a valid attribute, exit. + return b, nil + } + + attribute, value := strings.TrimSpace(h[1]), strings.TrimSpace(h[2]) + var err error + switch pType + "/" + attribute { + case "heap/format", "cpu/format", "contention/format": + if value != "java" { + return nil, errUnrecognized + } + case "heap/resolution": + p.SampleType = []*ValueType{ + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: value}, + } + case "contention/resolution": + p.SampleType = []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: value}, + } + case "contention/sampling period": + p.PeriodType = &ValueType{ + Type: "contentions", Unit: "count", + } + if p.Period, err = strconv.ParseInt(value, 0, 64); err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + case "contention/ms since reset": + millis, err := strconv.ParseInt(value, 0, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + p.DurationNanos = millis * 1000 * 1000 + default: + return nil, errUnrecognized + } + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, nil +} + +// parseJavaSamples parses the samples from a java profile and +// populates the Samples in a profile. Returns the remainder of the +// buffer after the samples. +func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + locs := make(map[uint64]*Location) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + sample := javaSampleRx.FindStringSubmatch(line) + if sample == nil { + // Not a valid sample, exit. + return b, locs, nil + } + + // Java profiles have data/fields inverted compared to other + // profile types. + var err error + value1, value2, value3 := sample[2], sample[1], sample[3] + addrs, err := parseHexAddresses(value3) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + var sloc []*Location + for _, addr := range addrs { + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + s := &Sample{ + Value: make([]int64, 2), + Location: sloc, + } + + if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + + switch pType { + case "heap": + const javaHeapzSamplingRate = 524288 // 512K + if s.Value[0] == 0 { + return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line) + } + s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}} + s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate) + case "contention": + if period := p.Period; period != 0 { + s.Value[0] = s.Value[0] * p.Period + s.Value[1] = s.Value[1] * p.Period + } + } + p.Sample = append(p.Sample, s) + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, locs, nil +} + +// parseJavaLocations parses the location information in a java +// profile and populates the Locations in a profile. It uses the +// location addresses from the profile as both the ID of each +// location. +func parseJavaLocations(b []byte, locs map[uint64]*Location, p *Profile) error { + r := bytes.NewBuffer(b) + fns := make(map[string]*Function) + for { + line, err := r.ReadString('\n') + if err != nil { + if err != io.EOF { + return err + } + if line == "" { + break + } + } + + if line = strings.TrimSpace(line); line == "" { + continue + } + + jloc := javaLocationRx.FindStringSubmatch(line) + if len(jloc) != 3 { + continue + } + addr, err := strconv.ParseUint(jloc[1], 16, 64) + if err != nil { + return fmt.Errorf("parsing sample %s: %v", line, err) + } + loc := locs[addr] + if loc == nil { + // Unused/unseen + continue + } + var lineFunc, lineFile string + var lineNo int64 + + if fileLine := javaLocationFileLineRx.FindStringSubmatch(jloc[2]); len(fileLine) == 4 { + // Found a line of the form: "function (file:line)" + lineFunc, lineFile = fileLine[1], fileLine[2] + if n, err := strconv.ParseInt(fileLine[3], 10, 64); err == nil && n > 0 { + lineNo = n + } + } else if filePath := javaLocationPathRx.FindStringSubmatch(jloc[2]); len(filePath) == 3 { + // If there's not a file:line, it's a shared library path. + // The path isn't interesting, so just give the .so. + lineFunc, lineFile = filePath[1], filepath.Base(filePath[2]) + } else if strings.Contains(jloc[2], "generated stub/JIT") { + lineFunc = "STUB" + } else { + // Treat whole line as the function name. This is used by the + // java agent for internal states such as "GC" or "VM". + lineFunc = jloc[2] + } + fn := fns[lineFunc] + + if fn == nil { + fn = &Function{ + Name: lineFunc, + SystemName: lineFunc, + Filename: lineFile, + } + fns[lineFunc] = fn + p.Function = append(p.Function, fn) + } + loc.Line = []Line{ + { + Function: fn, + Line: lineNo, + }, + } + loc.Address = 0 + } + + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + + return nil +} diff --git a/vendor/github.com/google/pprof/profile/legacy_profile.go b/vendor/github.com/google/pprof/profile/legacy_profile.go new file mode 100644 index 00000000..8d07fd6c --- /dev/null +++ b/vendor/github.com/google/pprof/profile/legacy_profile.go @@ -0,0 +1,1228 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert legacy profiles into the +// profile.proto format. + +package profile + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "regexp" + "strconv" + "strings" +) + +var ( + countStartRE = regexp.MustCompile(`\A(\S+) profile: total \d+\z`) + countRE = regexp.MustCompile(`\A(\d+) @(( 0x[0-9a-f]+)+)\z`) + + heapHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] *@ *(heap[_a-z0-9]*)/?(\d*)`) + heapSampleRE = regexp.MustCompile(`(-?\d+): *(-?\d+) *\[ *(\d+): *(\d+) *] @([ x0-9a-f]*)`) + + contentionSampleRE = regexp.MustCompile(`(\d+) *(\d+) @([ x0-9a-f]*)`) + + hexNumberRE = regexp.MustCompile(`0x[0-9a-f]+`) + + growthHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ growthz?`) + + fragmentationHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ fragmentationz?`) + + threadzStartRE = regexp.MustCompile(`--- threadz \d+ ---`) + threadStartRE = regexp.MustCompile(`--- Thread ([[:xdigit:]]+) \(name: (.*)/(\d+)\) stack: ---`) + + // Regular expressions to parse process mappings. Support the format used by Linux /proc/.../maps and other tools. + // Recommended format: + // Start End object file name offset(optional) linker build id + // 0x40000-0x80000 /path/to/binary (@FF00) abc123456 + spaceDigits = `\s+[[:digit:]]+` + hexPair = `\s+[[:xdigit:]]+:[[:xdigit:]]+` + oSpace = `\s*` + // Capturing expressions. + cHex = `(?:0x)?([[:xdigit:]]+)` + cHexRange = `\s*` + cHex + `[\s-]?` + oSpace + cHex + `:?` + cSpaceString = `(?:\s+(\S+))?` + cSpaceHex = `(?:\s+([[:xdigit:]]+))?` + cSpaceAtOffset = `(?:\s+\(@([[:xdigit:]]+)\))?` + cPerm = `(?:\s+([-rwxp]+))?` + + procMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceHex + hexPair + spaceDigits + cSpaceString) + briefMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceString + cSpaceAtOffset + cSpaceHex) + + // Regular expression to parse log data, of the form: + // ... file:line] msg... + logInfoRE = regexp.MustCompile(`^[^\[\]]+:[0-9]+]\s`) +) + +func isSpaceOrComment(line string) bool { + trimmed := strings.TrimSpace(line) + return len(trimmed) == 0 || trimmed[0] == '#' +} + +// parseGoCount parses a Go count profile (e.g., threadcreate or +// goroutine) and returns a new Profile. +func parseGoCount(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip comments at the beginning of the file. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + if err := s.Err(); err != nil { + return nil, err + } + m := countStartRE.FindStringSubmatch(s.Text()) + if m == nil { + return nil, errUnrecognized + } + profileType := m[1] + p := &Profile{ + PeriodType: &ValueType{Type: profileType, Unit: "count"}, + Period: 1, + SampleType: []*ValueType{{Type: profileType, Unit: "count"}}, + } + locations := make(map[uint64]*Location) + for s.Scan() { + line := s.Text() + if isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + m := countRE.FindStringSubmatch(line) + if m == nil { + return nil, errMalformed + } + n, err := strconv.ParseInt(m[1], 0, 64) + if err != nil { + return nil, errMalformed + } + fields := strings.Fields(m[2]) + locs := make([]*Location, 0, len(fields)) + for _, stk := range fields { + addr, err := strconv.ParseUint(stk, 0, 64) + if err != nil { + return nil, errMalformed + } + // Adjust all frames by -1 to land on top of the call instruction. + addr-- + loc := locations[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locations[addr] = loc + p.Location = append(p.Location, loc) + } + locs = append(locs, loc) + } + p.Sample = append(p.Sample, &Sample{ + Location: locs, + Value: []int64{n}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +// remapLocationIDs ensures there is a location for each address +// referenced by a sample, and remaps the samples to point to the new +// location ids. +func (p *Profile) remapLocationIDs() { + seen := make(map[*Location]bool, len(p.Location)) + var locs []*Location + + for _, s := range p.Sample { + for _, l := range s.Location { + if seen[l] { + continue + } + l.ID = uint64(len(locs) + 1) + locs = append(locs, l) + seen[l] = true + } + } + p.Location = locs +} + +func (p *Profile) remapFunctionIDs() { + seen := make(map[*Function]bool, len(p.Function)) + var fns []*Function + + for _, l := range p.Location { + for _, ln := range l.Line { + fn := ln.Function + if fn == nil || seen[fn] { + continue + } + fn.ID = uint64(len(fns) + 1) + fns = append(fns, fn) + seen[fn] = true + } + } + p.Function = fns +} + +// remapMappingIDs matches location addresses with existing mappings +// and updates them appropriately. This is O(N*M), if this ever shows +// up as a bottleneck, evaluate sorting the mappings and doing a +// binary search, which would make it O(N*log(M)). +func (p *Profile) remapMappingIDs() { + // Some profile handlers will incorrectly set regions for the main + // executable if its section is remapped. Fix them through heuristics. + + if len(p.Mapping) > 0 { + // Remove the initial mapping if named '/anon_hugepage' and has a + // consecutive adjacent mapping. + if m := p.Mapping[0]; strings.HasPrefix(m.File, "/anon_hugepage") { + if len(p.Mapping) > 1 && m.Limit == p.Mapping[1].Start { + p.Mapping = p.Mapping[1:] + } + } + } + + // Subtract the offset from the start of the main mapping if it + // ends up at a recognizable start address. + if len(p.Mapping) > 0 { + const expectedStart = 0x400000 + if m := p.Mapping[0]; m.Start-m.Offset == expectedStart { + m.Start = expectedStart + m.Offset = 0 + } + } + + // Associate each location with an address to the corresponding + // mapping. Create fake mapping if a suitable one isn't found. + var fake *Mapping +nextLocation: + for _, l := range p.Location { + a := l.Address + if l.Mapping != nil || a == 0 { + continue + } + for _, m := range p.Mapping { + if m.Start <= a && a < m.Limit { + l.Mapping = m + continue nextLocation + } + } + // Work around legacy handlers failing to encode the first + // part of mappings split into adjacent ranges. + for _, m := range p.Mapping { + if m.Offset != 0 && m.Start-m.Offset <= a && a < m.Start { + m.Start -= m.Offset + m.Offset = 0 + l.Mapping = m + continue nextLocation + } + } + // If there is still no mapping, create a fake one. + // This is important for the Go legacy handler, which produced + // no mappings. + if fake == nil { + fake = &Mapping{ + ID: 1, + Limit: ^uint64(0), + } + p.Mapping = append(p.Mapping, fake) + } + l.Mapping = fake + } + + // Reset all mapping IDs. + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +var cpuInts = []func([]byte) (uint64, []byte){ + get32l, + get32b, + get64l, + get64b, +} + +func get32l(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24, b[4:] +} + +func get32b(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24, b[4:] +} + +func get64l(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56, b[8:] +} + +func get64b(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56, b[8:] +} + +// parseCPU parses a profilez legacy profile and returns a newly +// populated Profile. +// +// The general format for profilez samples is a sequence of words in +// binary format. The first words are a header with the following data: +// +// 1st word -- 0 +// 2nd word -- 3 +// 3rd word -- 0 if a c++ application, 1 if a java application. +// 4th word -- Sampling period (in microseconds). +// 5th word -- Padding. +func parseCPU(b []byte) (*Profile, error) { + var parse func([]byte) (uint64, []byte) + var n1, n2, n3, n4, n5 uint64 + for _, parse = range cpuInts { + var tmp []byte + n1, tmp = parse(b) + n2, tmp = parse(tmp) + n3, tmp = parse(tmp) + n4, tmp = parse(tmp) + n5, tmp = parse(tmp) + + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 0 && n4 > 0 && n5 == 0 { + b = tmp + return cpuProfile(b, int64(n4), parse) + } + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 1 && n4 > 0 && n5 == 0 { + b = tmp + return javaCPUProfile(b, int64(n4), parse) + } + } + return nil, errUnrecognized +} + +// cpuProfile returns a new Profile from C++ profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func cpuProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "nanoseconds"}, + }, + } + var err error + if b, _, err = parseCPUSamples(b, parse, true, p); err != nil { + return nil, err + } + + // If *most* samples have the same second-to-the-bottom frame, it + // strongly suggests that it is an uninteresting artifact of + // measurement -- a stack frame pushed by the signal handler. The + // bottom frame is always correct as it is picked up from the signal + // structure, not the stack. Check if this is the case and if so, + // remove. + + // Remove up to two frames. + maxiter := 2 + // Allow one different sample for this many samples with the same + // second-to-last frame. + similarSamples := 32 + margin := len(p.Sample) / similarSamples + + for iter := 0; iter < maxiter; iter++ { + addr1 := make(map[uint64]int) + for _, s := range p.Sample { + if len(s.Location) > 1 { + a := s.Location[1].Address + addr1[a] = addr1[a] + 1 + } + } + + for id1, count := range addr1 { + if count >= len(p.Sample)-margin { + // Found uninteresting frame, strip it out from all samples + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[1].Address == id1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } + break + } + } + } + + if err := p.ParseMemoryMap(bytes.NewBuffer(b)); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +func cleanupDuplicateLocations(p *Profile) { + // The profile handler may duplicate the leaf frame, because it gets + // its address both from stack unwinding and from the signal + // context. Detect this and delete the duplicate, which has been + // adjusted by -1. The leaf address should not be adjusted as it is + // not a call. + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[0].Address == s.Location[1].Address+1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } +} + +// parseCPUSamples parses a collection of profilez samples from a +// profile. +// +// profilez samples are a repeated sequence of stack frames of the +// form: +// +// 1st word -- The number of times this stack was encountered. +// 2nd word -- The size of the stack (StackSize). +// 3rd word -- The first address on the stack. +// ... +// StackSize + 2 -- The last address on the stack +// +// The last stack trace is of the form: +// +// 1st word -- 0 +// 2nd word -- 1 +// 3rd word -- 0 +// +// Addresses from stack traces may point to the next instruction after +// each call. Optionally adjust by -1 to land somewhere on the actual +// call (except for the leaf, which is not a call). +func parseCPUSamples(b []byte, parse func(b []byte) (uint64, []byte), adjust bool, p *Profile) ([]byte, map[uint64]*Location, error) { + locs := make(map[uint64]*Location) + for len(b) > 0 { + var count, nstk uint64 + count, b = parse(b) + nstk, b = parse(b) + if b == nil || nstk > uint64(len(b)/4) { + return nil, nil, errUnrecognized + } + var sloc []*Location + addrs := make([]uint64, nstk) + for i := 0; i < int(nstk); i++ { + addrs[i], b = parse(b) + } + + if count == 0 && nstk == 1 && addrs[0] == 0 { + // End of data marker + break + } + for i, addr := range addrs { + if adjust && i > 0 { + addr-- + } + loc := locs[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locs[addr] = loc + p.Location = append(p.Location, loc) + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, + &Sample{ + Value: []int64{int64(count), int64(count) * p.Period}, + Location: sloc, + }) + } + // Reached the end without finding the EOD marker. + return b, locs, nil +} + +// parseHeap parses a heapz legacy or a growthz profile and +// returns a newly populated Profile. +func parseHeap(b []byte) (p *Profile, err error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + p = &Profile{} + + sampling := "" + hasAlloc := false + + line := s.Text() + p.PeriodType = &ValueType{Type: "space", Unit: "bytes"} + if header := heapHeaderRE.FindStringSubmatch(line); header != nil { + sampling, p.Period, hasAlloc, err = parseHeapHeader(line) + if err != nil { + return nil, err + } + } else if header = growthHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else if header = fragmentationHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else { + return nil, errUnrecognized + } + + if hasAlloc { + // Put alloc before inuse so that default pprof selection + // will prefer inuse_space. + p.SampleType = []*ValueType{ + {Type: "alloc_objects", Unit: "count"}, + {Type: "alloc_space", Unit: "bytes"}, + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: "bytes"}, + } + } else { + p.SampleType = []*ValueType{ + {Type: "objects", Unit: "count"}, + {Type: "space", Unit: "bytes"}, + } + } + + locs := make(map[uint64]*Location) + for s.Scan() { + line := strings.TrimSpace(s.Text()) + + if isSpaceOrComment(line) { + continue + } + + if isMemoryMapSentinel(line) { + break + } + + value, blocksize, addrs, err := parseHeapSample(line, p.Period, sampling, hasAlloc) + if err != nil { + return nil, err + } + + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + NumLabel: map[string][]int64{"bytes": {blocksize}}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +func parseHeapHeader(line string) (sampling string, period int64, hasAlloc bool, err error) { + header := heapHeaderRE.FindStringSubmatch(line) + if header == nil { + return "", 0, false, errUnrecognized + } + + if len(header[6]) > 0 { + if period, err = strconv.ParseInt(header[6], 10, 64); err != nil { + return "", 0, false, errUnrecognized + } + } + + if (header[3] != header[1] && header[3] != "0") || (header[4] != header[2] && header[4] != "0") { + hasAlloc = true + } + + switch header[5] { + case "heapz_v2", "heap_v2": + return "v2", period, hasAlloc, nil + case "heapprofile": + return "", 1, hasAlloc, nil + case "heap": + return "v2", period / 2, hasAlloc, nil + default: + return "", 0, false, errUnrecognized + } +} + +// parseHeapSample parses a single row from a heap profile into a new Sample. +func parseHeapSample(line string, rate int64, sampling string, includeAlloc bool) (value []int64, blocksize int64, addrs []uint64, err error) { + sampleData := heapSampleRE.FindStringSubmatch(line) + if len(sampleData) != 6 { + return nil, 0, nil, fmt.Errorf("unexpected number of sample values: got %d, want 6", len(sampleData)) + } + + // This is a local-scoped helper function to avoid needing to pass + // around rate, sampling and many return parameters. + addValues := func(countString, sizeString string, label string) error { + count, err := strconv.ParseInt(countString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + size, err := strconv.ParseInt(sizeString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + if count == 0 && size != 0 { + return fmt.Errorf("%s count was 0 but %s bytes was %d", label, label, size) + } + if count != 0 { + blocksize = size / count + if sampling == "v2" { + count, size = scaleHeapSample(count, size, rate) + } + } + value = append(value, count, size) + return nil + } + + if includeAlloc { + if err := addValues(sampleData[3], sampleData[4], "allocation"); err != nil { + return nil, 0, nil, err + } + } + + if err := addValues(sampleData[1], sampleData[2], "inuse"); err != nil { + return nil, 0, nil, err + } + + addrs, err = parseHexAddresses(sampleData[5]) + if err != nil { + return nil, 0, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, blocksize, addrs, nil +} + +// parseHexAddresses extracts hex numbers from a string, attempts to convert +// each to an unsigned 64-bit number and returns the resulting numbers as a +// slice, or an error if the string contains hex numbers which are too large to +// handle (which means a malformed profile). +func parseHexAddresses(s string) ([]uint64, error) { + hexStrings := hexNumberRE.FindAllString(s, -1) + var addrs []uint64 + for _, s := range hexStrings { + if addr, err := strconv.ParseUint(s, 0, 64); err == nil { + addrs = append(addrs, addr) + } else { + return nil, fmt.Errorf("failed to parse as hex 64-bit number: %s", s) + } + } + return addrs, nil +} + +// scaleHeapSample adjusts the data from a heapz Sample to +// account for its probability of appearing in the collected +// data. heapz profiles are a sampling of the memory allocations +// requests in a program. We estimate the unsampled value by dividing +// each collected sample by its probability of appearing in the +// profile. heapz v2 profiles rely on a poisson process to determine +// which samples to collect, based on the desired average collection +// rate R. The probability of a sample of size S to appear in that +// profile is 1-exp(-S/R). +func scaleHeapSample(count, size, rate int64) (int64, int64) { + if count == 0 || size == 0 { + return 0, 0 + } + + if rate <= 1 { + // if rate==1 all samples were collected so no adjustment is needed. + // if rate<1 treat as unknown and skip scaling. + return count, size + } + + avgSize := float64(size) / float64(count) + scale := 1 / (1 - math.Exp(-avgSize/float64(rate))) + + return int64(float64(count) * scale), int64(float64(size) * scale) +} + +// parseContention parses a mutex or contention profile. There are 2 cases: +// "--- contentionz " for legacy C++ profiles (and backwards compatibility) +// "--- mutex:" or "--- contention:" for profiles generated by the Go runtime. +func parseContention(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + + switch l := s.Text(); { + case strings.HasPrefix(l, "--- contentionz "): + case strings.HasPrefix(l, "--- mutex:"): + case strings.HasPrefix(l, "--- contention:"): + default: + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{Type: "contentions", Unit: "count"}, + Period: 1, + SampleType: []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: "nanoseconds"}, + }, + } + + var cpuHz int64 + // Parse text of the form "attribute = value" before the samples. + const delimiter = "=" + for s.Scan() { + line := s.Text() + if line = strings.TrimSpace(line); isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + attr := strings.SplitN(line, delimiter, 2) + if len(attr) != 2 { + break + } + key, val := strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1]) + var err error + switch key { + case "cycles/second": + if cpuHz, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "sampling period": + if p.Period, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "ms since reset": + ms, err := strconv.ParseInt(val, 0, 64) + if err != nil { + return nil, errUnrecognized + } + p.DurationNanos = ms * 1000 * 1000 + case "format": + // CPP contentionz profiles don't have format. + return nil, errUnrecognized + case "resolution": + // CPP contentionz profiles don't have resolution. + return nil, errUnrecognized + case "discarded samples": + default: + return nil, errUnrecognized + } + } + if err := s.Err(); err != nil { + return nil, err + } + + locs := make(map[uint64]*Location) + for { + line := strings.TrimSpace(s.Text()) + if strings.HasPrefix(line, "---") { + break + } + if !isSpaceOrComment(line) { + value, addrs, err := parseContentionSample(line, p.Period, cpuHz) + if err != nil { + return nil, err + } + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + }) + } + if !s.Scan() { + break + } + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + return p, nil +} + +// parseContentionSample parses a single row from a contention profile +// into a new Sample. +func parseContentionSample(line string, period, cpuHz int64) (value []int64, addrs []uint64, err error) { + sampleData := contentionSampleRE.FindStringSubmatch(line) + if sampleData == nil { + return nil, nil, errUnrecognized + } + + v1, err := strconv.ParseInt(sampleData[1], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + v2, err := strconv.ParseInt(sampleData[2], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + // Unsample values if period and cpuHz are available. + // - Delays are scaled to cycles and then to nanoseconds. + // - Contentions are scaled to cycles. + if period > 0 { + if cpuHz > 0 { + cpuGHz := float64(cpuHz) / 1e9 + v1 = int64(float64(v1) * float64(period) / cpuGHz) + } + v2 = v2 * period + } + + value = []int64{v2, v1} + addrs, err = parseHexAddresses(sampleData[3]) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, addrs, nil +} + +// parseThread parses a Threadz profile and returns a new Profile. +func parseThread(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip past comments and empty lines seeking a real header. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + + line := s.Text() + if m := threadzStartRE.FindStringSubmatch(line); m != nil { + // Advance over initial comments until first stack trace. + for s.Scan() { + if line = s.Text(); isMemoryMapSentinel(line) || strings.HasPrefix(line, "-") { + break + } + } + } else if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + p := &Profile{ + SampleType: []*ValueType{{Type: "thread", Unit: "count"}}, + PeriodType: &ValueType{Type: "thread", Unit: "count"}, + Period: 1, + } + + locs := make(map[uint64]*Location) + // Recognize each thread and populate profile samples. + for !isMemoryMapSentinel(line) { + if strings.HasPrefix(line, "---- no stack trace for") { + break + } + if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + var addrs []uint64 + var err error + line, addrs, err = parseThreadSample(s) + if err != nil { + return nil, err + } + if len(addrs) == 0 { + // We got a --same as previous threads--. Bump counters. + if len(p.Sample) > 0 { + s := p.Sample[len(p.Sample)-1] + s.Value[0]++ + } + continue + } + + var sloc []*Location + for i, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call + // (except for the leaf, which is not a call). + if i > 0 { + addr-- + } + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: []int64{1}, + Location: sloc, + }) + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +// parseThreadSample parses a symbolized or unsymbolized stack trace. +// Returns the first line after the traceback, the sample (or nil if +// it hits a 'same-as-previous' marker) and an error. +func parseThreadSample(s *bufio.Scanner) (nextl string, addrs []uint64, err error) { + var line string + sameAsPrevious := false + for s.Scan() { + line = strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + if strings.HasPrefix(line, "---") { + break + } + if strings.Contains(line, "same as previous thread") { + sameAsPrevious = true + continue + } + + curAddrs, err := parseHexAddresses(line) + if err != nil { + return "", nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + addrs = append(addrs, curAddrs...) + } + if err := s.Err(); err != nil { + return "", nil, err + } + if sameAsPrevious { + return line, nil, nil + } + return line, addrs, nil +} + +// parseAdditionalSections parses any additional sections in the +// profile, ignoring any unrecognized sections. +func parseAdditionalSections(s *bufio.Scanner, p *Profile) error { + for !isMemoryMapSentinel(s.Text()) && s.Scan() { + } + if err := s.Err(); err != nil { + return err + } + return p.ParseMemoryMapFromScanner(s) +} + +// ParseProcMaps parses a memory map in the format of /proc/self/maps. +// ParseMemoryMap should be called after setting on a profile to +// associate locations to the corresponding mapping based on their +// address. +func ParseProcMaps(rd io.Reader) ([]*Mapping, error) { + s := bufio.NewScanner(rd) + return parseProcMapsFromScanner(s) +} + +func parseProcMapsFromScanner(s *bufio.Scanner) ([]*Mapping, error) { + var mapping []*Mapping + + var attrs []string + const delimiter = "=" + r := strings.NewReplacer() + for s.Scan() { + line := r.Replace(removeLoggingInfo(s.Text())) + m, err := parseMappingEntry(line) + if err != nil { + if err == errUnrecognized { + // Recognize assignments of the form: attr=value, and replace + // $attr with value on subsequent mappings. + if attr := strings.SplitN(line, delimiter, 2); len(attr) == 2 { + attrs = append(attrs, "$"+strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1])) + r = strings.NewReplacer(attrs...) + } + // Ignore any unrecognized entries + continue + } + return nil, err + } + if m == nil { + continue + } + mapping = append(mapping, m) + } + if err := s.Err(); err != nil { + return nil, err + } + return mapping, nil +} + +// removeLoggingInfo detects and removes log prefix entries generated +// by the glog package. If no logging prefix is detected, the string +// is returned unmodified. +func removeLoggingInfo(line string) string { + if match := logInfoRE.FindStringIndex(line); match != nil { + return line[match[1]:] + } + return line +} + +// ParseMemoryMap parses a memory map in the format of +// /proc/self/maps, and overrides the mappings in the current profile. +// It renumbers the samples and locations in the profile correspondingly. +func (p *Profile) ParseMemoryMap(rd io.Reader) error { + return p.ParseMemoryMapFromScanner(bufio.NewScanner(rd)) +} + +// ParseMemoryMapFromScanner parses a memory map in the format of +// /proc/self/maps or a variety of legacy format, and overrides the +// mappings in the current profile. It renumbers the samples and +// locations in the profile correspondingly. +func (p *Profile) ParseMemoryMapFromScanner(s *bufio.Scanner) error { + mapping, err := parseProcMapsFromScanner(s) + if err != nil { + return err + } + p.Mapping = append(p.Mapping, mapping...) + p.massageMappings() + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + return nil +} + +func parseMappingEntry(l string) (*Mapping, error) { + var start, end, perm, file, offset, buildID string + if me := procMapsRE.FindStringSubmatch(l); len(me) == 6 { + start, end, perm, offset, file = me[1], me[2], me[3], me[4], me[5] + } else if me := briefMapsRE.FindStringSubmatch(l); len(me) == 7 { + start, end, perm, file, offset, buildID = me[1], me[2], me[3], me[4], me[5], me[6] + } else { + return nil, errUnrecognized + } + + var err error + mapping := &Mapping{ + File: file, + BuildID: buildID, + } + if perm != "" && !strings.Contains(perm, "x") { + // Skip non-executable entries. + return nil, nil + } + if mapping.Start, err = strconv.ParseUint(start, 16, 64); err != nil { + return nil, errUnrecognized + } + if mapping.Limit, err = strconv.ParseUint(end, 16, 64); err != nil { + return nil, errUnrecognized + } + if offset != "" { + if mapping.Offset, err = strconv.ParseUint(offset, 16, 64); err != nil { + return nil, errUnrecognized + } + } + return mapping, nil +} + +var memoryMapSentinels = []string{ + "--- Memory map: ---", + "MAPPED_LIBRARIES:", +} + +// isMemoryMapSentinel returns true if the string contains one of the +// known sentinels for memory map information. +func isMemoryMapSentinel(line string) bool { + for _, s := range memoryMapSentinels { + if strings.Contains(line, s) { + return true + } + } + return false +} + +func (p *Profile) addLegacyFrameInfo() { + switch { + case isProfileType(p, heapzSampleTypes): + p.DropFrames, p.KeepFrames = allocRxStr, allocSkipRxStr + case isProfileType(p, contentionzSampleTypes): + p.DropFrames, p.KeepFrames = lockRxStr, "" + default: + p.DropFrames, p.KeepFrames = cpuProfilerRxStr, "" + } +} + +var heapzSampleTypes = [][]string{ + {"allocations", "size"}, // early Go pprof profiles + {"objects", "space"}, + {"inuse_objects", "inuse_space"}, + {"alloc_objects", "alloc_space"}, + {"alloc_objects", "alloc_space", "inuse_objects", "inuse_space"}, // Go pprof legacy profiles +} +var contentionzSampleTypes = [][]string{ + {"contentions", "delay"}, +} + +func isProfileType(p *Profile, types [][]string) bool { + st := p.SampleType +nextType: + for _, t := range types { + if len(st) != len(t) { + continue + } + + for i := range st { + if st[i].Type != t[i] { + continue nextType + } + } + return true + } + return false +} + +var allocRxStr = strings.Join([]string{ + // POSIX entry points. + `calloc`, + `cfree`, + `malloc`, + `free`, + `memalign`, + `do_memalign`, + `(__)?posix_memalign`, + `pvalloc`, + `valloc`, + `realloc`, + + // TC malloc. + `tcmalloc::.*`, + `tc_calloc`, + `tc_cfree`, + `tc_malloc`, + `tc_free`, + `tc_memalign`, + `tc_posix_memalign`, + `tc_pvalloc`, + `tc_valloc`, + `tc_realloc`, + `tc_new`, + `tc_delete`, + `tc_newarray`, + `tc_deletearray`, + `tc_new_nothrow`, + `tc_newarray_nothrow`, + + // Memory-allocation routines on OS X. + `malloc_zone_malloc`, + `malloc_zone_calloc`, + `malloc_zone_valloc`, + `malloc_zone_realloc`, + `malloc_zone_memalign`, + `malloc_zone_free`, + + // Go runtime + `runtime\..*`, + + // Other misc. memory allocation routines + `BaseArena::.*`, + `(::)?do_malloc_no_errno`, + `(::)?do_malloc_pages`, + `(::)?do_malloc`, + `DoSampledAllocation`, + `MallocedMemBlock::MallocedMemBlock`, + `_M_allocate`, + `__builtin_(vec_)?delete`, + `__builtin_(vec_)?new`, + `__gnu_cxx::new_allocator::allocate`, + `__libc_malloc`, + `__malloc_alloc_template::allocate`, + `allocate`, + `cpp_alloc`, + `operator new(\[\])?`, + `simple_alloc::allocate`, +}, `|`) + +var allocSkipRxStr = strings.Join([]string{ + // Preserve Go runtime frames that appear in the middle/bottom of + // the stack. + `runtime\.panic`, + `runtime\.reflectcall`, + `runtime\.call[0-9]*`, +}, `|`) + +var cpuProfilerRxStr = strings.Join([]string{ + `ProfileData::Add`, + `ProfileData::prof_handler`, + `CpuProfiler::prof_handler`, + `__pthread_sighandler`, + `__restore`, +}, `|`) + +var lockRxStr = strings.Join([]string{ + `RecordLockProfileData`, + `(base::)?RecordLockProfileData.*`, + `(base::)?SubmitMutexProfileData.*`, + `(base::)?SubmitSpinLockProfileData.*`, + `(base::Mutex::)?AwaitCommon.*`, + `(base::Mutex::)?Unlock.*`, + `(base::Mutex::)?UnlockSlow.*`, + `(base::Mutex::)?ReaderUnlock.*`, + `(base::MutexLock::)?~MutexLock.*`, + `(Mutex::)?AwaitCommon.*`, + `(Mutex::)?Unlock.*`, + `(Mutex::)?UnlockSlow.*`, + `(Mutex::)?ReaderUnlock.*`, + `(MutexLock::)?~MutexLock.*`, + `(SpinLock::)?Unlock.*`, + `(SpinLock::)?SlowUnlock.*`, + `(SpinLockHolder::)?~SpinLockHolder.*`, +}, `|`) diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go new file mode 100644 index 00000000..4b66282c --- /dev/null +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -0,0 +1,667 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "encoding/binary" + "fmt" + "sort" + "strconv" + "strings" +) + +// Compact performs garbage collection on a profile to remove any +// unreferenced fields. This is useful to reduce the size of a profile +// after samples or locations have been removed. +func (p *Profile) Compact() *Profile { + p, _ = Merge([]*Profile{p}) + return p +} + +// Merge merges all the profiles in profs into a single Profile. +// Returns a new profile independent of the input profiles. The merged +// profile is compacted to eliminate unused samples, locations, +// functions and mappings. Profiles must have identical profile sample +// and period types or the merge will fail. profile.Period of the +// resulting profile will be the maximum of all profiles, and +// profile.TimeNanos will be the earliest nonzero one. Merges are +// associative with the caveat of the first profile having some +// specialization in how headers are combined. There may be other +// subtleties now or in the future regarding associativity. +func Merge(srcs []*Profile) (*Profile, error) { + if len(srcs) == 0 { + return nil, fmt.Errorf("no profiles to merge") + } + p, err := combineHeaders(srcs) + if err != nil { + return nil, err + } + + pm := &profileMerger{ + p: p, + samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)), + locations: make(map[locationKey]*Location, len(srcs[0].Location)), + functions: make(map[functionKey]*Function, len(srcs[0].Function)), + mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)), + } + + for _, src := range srcs { + // Clear the profile-specific hash tables + pm.locationsByID = makeLocationIDMap(len(src.Location)) + pm.functionsByID = make(map[uint64]*Function, len(src.Function)) + pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping)) + + if len(pm.mappings) == 0 && len(src.Mapping) > 0 { + // The Mapping list has the property that the first mapping + // represents the main binary. Take the first Mapping we see, + // otherwise the operations below will add mappings in an + // arbitrary order. + pm.mapMapping(src.Mapping[0]) + } + + for _, s := range src.Sample { + if !isZeroSample(s) { + pm.mapSample(s) + } + } + } + + for _, s := range p.Sample { + if isZeroSample(s) { + // If there are any zero samples, re-merge the profile to GC + // them. + return Merge([]*Profile{p}) + } + } + + return p, nil +} + +// Normalize normalizes the source profile by multiplying each value in profile by the +// ratio of the sum of the base profile's values of that sample type to the sum of the +// source profile's value of that sample type. +func (p *Profile) Normalize(pb *Profile) error { + + if err := p.compatible(pb); err != nil { + return err + } + + baseVals := make([]int64, len(p.SampleType)) + for _, s := range pb.Sample { + for i, v := range s.Value { + baseVals[i] += v + } + } + + srcVals := make([]int64, len(p.SampleType)) + for _, s := range p.Sample { + for i, v := range s.Value { + srcVals[i] += v + } + } + + normScale := make([]float64, len(baseVals)) + for i := range baseVals { + if srcVals[i] == 0 { + normScale[i] = 0.0 + } else { + normScale[i] = float64(baseVals[i]) / float64(srcVals[i]) + } + } + p.ScaleN(normScale) + return nil +} + +func isZeroSample(s *Sample) bool { + for _, v := range s.Value { + if v != 0 { + return false + } + } + return true +} + +type profileMerger struct { + p *Profile + + // Memoization tables within a profile. + locationsByID locationIDMap + functionsByID map[uint64]*Function + mappingsByID map[uint64]mapInfo + + // Memoization tables for profile entities. + samples map[sampleKey]*Sample + locations map[locationKey]*Location + functions map[functionKey]*Function + mappings map[mappingKey]*Mapping +} + +type mapInfo struct { + m *Mapping + offset int64 +} + +func (pm *profileMerger) mapSample(src *Sample) *Sample { + // Check memoization table + k := pm.sampleKey(src) + if ss, ok := pm.samples[k]; ok { + for i, v := range src.Value { + ss.Value[i] += v + } + return ss + } + + // Make new sample. + s := &Sample{ + Location: make([]*Location, len(src.Location)), + Value: make([]int64, len(src.Value)), + Label: make(map[string][]string, len(src.Label)), + NumLabel: make(map[string][]int64, len(src.NumLabel)), + NumUnit: make(map[string][]string, len(src.NumLabel)), + } + for i, l := range src.Location { + s.Location[i] = pm.mapLocation(l) + } + for k, v := range src.Label { + vv := make([]string, len(v)) + copy(vv, v) + s.Label[k] = vv + } + for k, v := range src.NumLabel { + u := src.NumUnit[k] + vv := make([]int64, len(v)) + uu := make([]string, len(u)) + copy(vv, v) + copy(uu, u) + s.NumLabel[k] = vv + s.NumUnit[k] = uu + } + copy(s.Value, src.Value) + pm.samples[k] = s + pm.p.Sample = append(pm.p.Sample, s) + return s +} + +func (pm *profileMerger) sampleKey(sample *Sample) sampleKey { + // Accumulate contents into a string. + var buf strings.Builder + buf.Grow(64) // Heuristic to avoid extra allocs + + // encode a number + putNumber := func(v uint64) { + var num [binary.MaxVarintLen64]byte + n := binary.PutUvarint(num[:], v) + buf.Write(num[:n]) + } + + // encode a string prefixed with its length. + putDelimitedString := func(s string) { + putNumber(uint64(len(s))) + buf.WriteString(s) + } + + for _, l := range sample.Location { + // Get the location in the merged profile, which may have a different ID. + if loc := pm.mapLocation(l); loc != nil { + putNumber(loc.ID) + } + } + putNumber(0) // Delimiter + + for _, l := range sortedKeys1(sample.Label) { + putDelimitedString(l) + values := sample.Label[l] + putNumber(uint64(len(values))) + for _, v := range values { + putDelimitedString(v) + } + } + + for _, l := range sortedKeys2(sample.NumLabel) { + putDelimitedString(l) + values := sample.NumLabel[l] + putNumber(uint64(len(values))) + for _, v := range values { + putNumber(uint64(v)) + } + units := sample.NumUnit[l] + putNumber(uint64(len(units))) + for _, v := range units { + putDelimitedString(v) + } + } + + return sampleKey(buf.String()) +} + +type sampleKey string + +// sortedKeys1 returns the sorted keys found in a string->[]string map. +// +// Note: this is currently non-generic since github pprof runs golint, +// which does not support generics. When that issue is fixed, it can +// be merged with sortedKeys2 and made into a generic function. +func sortedKeys1(m map[string][]string) []string { + if len(m) == 0 { + return nil + } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// sortedKeys2 returns the sorted keys found in a string->[]int64 map. +// +// Note: this is currently non-generic since github pprof runs golint, +// which does not support generics. When that issue is fixed, it can +// be merged with sortedKeys1 and made into a generic function. +func sortedKeys2(m map[string][]int64) []string { + if len(m) == 0 { + return nil + } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +func (pm *profileMerger) mapLocation(src *Location) *Location { + if src == nil { + return nil + } + + if l := pm.locationsByID.get(src.ID); l != nil { + return l + } + + mi := pm.mapMapping(src.Mapping) + l := &Location{ + ID: uint64(len(pm.p.Location) + 1), + Mapping: mi.m, + Address: uint64(int64(src.Address) + mi.offset), + Line: make([]Line, len(src.Line)), + IsFolded: src.IsFolded, + } + for i, ln := range src.Line { + l.Line[i] = pm.mapLine(ln) + } + // Check memoization table. Must be done on the remapped location to + // account for the remapped mapping ID. + k := l.key() + if ll, ok := pm.locations[k]; ok { + pm.locationsByID.set(src.ID, ll) + return ll + } + pm.locationsByID.set(src.ID, l) + pm.locations[k] = l + pm.p.Location = append(pm.p.Location, l) + return l +} + +// key generates locationKey to be used as a key for maps. +func (l *Location) key() locationKey { + key := locationKey{ + addr: l.Address, + isFolded: l.IsFolded, + } + if l.Mapping != nil { + // Normalizes address to handle address space randomization. + key.addr -= l.Mapping.Start + key.mappingID = l.Mapping.ID + } + lines := make([]string, len(l.Line)*2) + for i, line := range l.Line { + if line.Function != nil { + lines[i*2] = strconv.FormatUint(line.Function.ID, 16) + } + lines[i*2+1] = strconv.FormatInt(line.Line, 16) + } + key.lines = strings.Join(lines, "|") + return key +} + +type locationKey struct { + addr, mappingID uint64 + lines string + isFolded bool +} + +func (pm *profileMerger) mapMapping(src *Mapping) mapInfo { + if src == nil { + return mapInfo{} + } + + if mi, ok := pm.mappingsByID[src.ID]; ok { + return mi + } + + // Check memoization tables. + mk := src.key() + if m, ok := pm.mappings[mk]; ok { + mi := mapInfo{m, int64(m.Start) - int64(src.Start)} + pm.mappingsByID[src.ID] = mi + return mi + } + m := &Mapping{ + ID: uint64(len(pm.p.Mapping) + 1), + Start: src.Start, + Limit: src.Limit, + Offset: src.Offset, + File: src.File, + KernelRelocationSymbol: src.KernelRelocationSymbol, + BuildID: src.BuildID, + HasFunctions: src.HasFunctions, + HasFilenames: src.HasFilenames, + HasLineNumbers: src.HasLineNumbers, + HasInlineFrames: src.HasInlineFrames, + } + pm.p.Mapping = append(pm.p.Mapping, m) + + // Update memoization tables. + pm.mappings[mk] = m + mi := mapInfo{m, 0} + pm.mappingsByID[src.ID] = mi + return mi +} + +// key generates encoded strings of Mapping to be used as a key for +// maps. +func (m *Mapping) key() mappingKey { + // Normalize addresses to handle address space randomization. + // Round up to next 4K boundary to avoid minor discrepancies. + const mapsizeRounding = 0x1000 + + size := m.Limit - m.Start + size = size + mapsizeRounding - 1 + size = size - (size % mapsizeRounding) + key := mappingKey{ + size: size, + offset: m.Offset, + } + + switch { + case m.BuildID != "": + key.buildIDOrFile = m.BuildID + case m.File != "": + key.buildIDOrFile = m.File + default: + // A mapping containing neither build ID nor file name is a fake mapping. A + // key with empty buildIDOrFile is used for fake mappings so that they are + // treated as the same mapping during merging. + } + return key +} + +type mappingKey struct { + size, offset uint64 + buildIDOrFile string +} + +func (pm *profileMerger) mapLine(src Line) Line { + ln := Line{ + Function: pm.mapFunction(src.Function), + Line: src.Line, + } + return ln +} + +func (pm *profileMerger) mapFunction(src *Function) *Function { + if src == nil { + return nil + } + if f, ok := pm.functionsByID[src.ID]; ok { + return f + } + k := src.key() + if f, ok := pm.functions[k]; ok { + pm.functionsByID[src.ID] = f + return f + } + f := &Function{ + ID: uint64(len(pm.p.Function) + 1), + Name: src.Name, + SystemName: src.SystemName, + Filename: src.Filename, + StartLine: src.StartLine, + } + pm.functions[k] = f + pm.functionsByID[src.ID] = f + pm.p.Function = append(pm.p.Function, f) + return f +} + +// key generates a struct to be used as a key for maps. +func (f *Function) key() functionKey { + return functionKey{ + f.StartLine, + f.Name, + f.SystemName, + f.Filename, + } +} + +type functionKey struct { + startLine int64 + name, systemName, fileName string +} + +// combineHeaders checks that all profiles can be merged and returns +// their combined profile. +func combineHeaders(srcs []*Profile) (*Profile, error) { + for _, s := range srcs[1:] { + if err := srcs[0].compatible(s); err != nil { + return nil, err + } + } + + var timeNanos, durationNanos, period int64 + var comments []string + seenComments := map[string]bool{} + var defaultSampleType string + for _, s := range srcs { + if timeNanos == 0 || s.TimeNanos < timeNanos { + timeNanos = s.TimeNanos + } + durationNanos += s.DurationNanos + if period == 0 || period < s.Period { + period = s.Period + } + for _, c := range s.Comments { + if seen := seenComments[c]; !seen { + comments = append(comments, c) + seenComments[c] = true + } + } + if defaultSampleType == "" { + defaultSampleType = s.DefaultSampleType + } + } + + p := &Profile{ + SampleType: make([]*ValueType, len(srcs[0].SampleType)), + + DropFrames: srcs[0].DropFrames, + KeepFrames: srcs[0].KeepFrames, + + TimeNanos: timeNanos, + DurationNanos: durationNanos, + PeriodType: srcs[0].PeriodType, + Period: period, + + Comments: comments, + DefaultSampleType: defaultSampleType, + } + copy(p.SampleType, srcs[0].SampleType) + return p, nil +} + +// compatible determines if two profiles can be compared/merged. +// returns nil if the profiles are compatible; otherwise an error with +// details on the incompatibility. +func (p *Profile) compatible(pb *Profile) error { + if !equalValueType(p.PeriodType, pb.PeriodType) { + return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType) + } + + if len(p.SampleType) != len(pb.SampleType) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + + for i := range p.SampleType { + if !equalValueType(p.SampleType[i], pb.SampleType[i]) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + } + return nil +} + +// equalValueType returns true if the two value types are semantically +// equal. It ignores the internal fields used during encode/decode. +func equalValueType(st1, st2 *ValueType) bool { + return st1.Type == st2.Type && st1.Unit == st2.Unit +} + +// locationIDMap is like a map[uint64]*Location, but provides efficiency for +// ids that are densely numbered, which is often the case. +type locationIDMap struct { + dense []*Location // indexed by id for id < len(dense) + sparse map[uint64]*Location // indexed by id for id >= len(dense) +} + +func makeLocationIDMap(n int) locationIDMap { + return locationIDMap{ + dense: make([]*Location, n), + sparse: map[uint64]*Location{}, + } +} + +func (lm locationIDMap) get(id uint64) *Location { + if id < uint64(len(lm.dense)) { + return lm.dense[int(id)] + } + return lm.sparse[id] +} + +func (lm locationIDMap) set(id uint64, loc *Location) { + if id < uint64(len(lm.dense)) { + lm.dense[id] = loc + return + } + lm.sparse[id] = loc +} + +// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It +// keeps sample types that appear in all profiles only and drops/reorders the +// sample types as necessary. +// +// In the case of sample types order is not the same for given profiles the +// order is derived from the first profile. +// +// Profiles are modified in-place. +// +// It returns an error if the sample type's intersection is empty. +func CompatibilizeSampleTypes(ps []*Profile) error { + sTypes := commonSampleTypes(ps) + if len(sTypes) == 0 { + return fmt.Errorf("profiles have empty common sample type list") + } + for _, p := range ps { + if err := compatibilizeSampleTypes(p, sTypes); err != nil { + return err + } + } + return nil +} + +// commonSampleTypes returns sample types that appear in all profiles in the +// order how they ordered in the first profile. +func commonSampleTypes(ps []*Profile) []string { + if len(ps) == 0 { + return nil + } + sTypes := map[string]int{} + for _, p := range ps { + for _, st := range p.SampleType { + sTypes[st.Type]++ + } + } + var res []string + for _, st := range ps[0].SampleType { + if sTypes[st.Type] == len(ps) { + res = append(res, st.Type) + } + } + return res +} + +// compatibilizeSampleTypes drops sample types that are not present in sTypes +// list and reorder them if needed. +// +// It sets DefaultSampleType to sType[0] if it is not in sType list. +// +// It assumes that all sample types from the sTypes list are present in the +// given profile otherwise it returns an error. +func compatibilizeSampleTypes(p *Profile, sTypes []string) error { + if len(sTypes) == 0 { + return fmt.Errorf("sample type list is empty") + } + defaultSampleType := sTypes[0] + reMap, needToModify := make([]int, len(sTypes)), false + for i, st := range sTypes { + if st == p.DefaultSampleType { + defaultSampleType = p.DefaultSampleType + } + idx := searchValueType(p.SampleType, st) + if idx < 0 { + return fmt.Errorf("%q sample type is not found in profile", st) + } + reMap[i] = idx + if idx != i { + needToModify = true + } + } + if !needToModify && len(sTypes) == len(p.SampleType) { + return nil + } + p.DefaultSampleType = defaultSampleType + oldSampleTypes := p.SampleType + p.SampleType = make([]*ValueType, len(sTypes)) + for i, idx := range reMap { + p.SampleType[i] = oldSampleTypes[idx] + } + values := make([]int64, len(sTypes)) + for _, s := range p.Sample { + for i, idx := range reMap { + values[i] = s.Value[idx] + } + s.Value = s.Value[:len(values)] + copy(s.Value, values) + } + return nil +} + +func searchValueType(vts []*ValueType, s string) int { + for i, vt := range vts { + if vt.Type == s { + return i + } + } + return -1 +} diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go new file mode 100644 index 00000000..60ef7e92 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -0,0 +1,856 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package profile provides a representation of profile.proto and +// methods to encode/decode profiles in this format. +package profile + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "math" + "path/filepath" + "regexp" + "sort" + "strings" + "sync" + "time" +) + +// Profile is an in-memory representation of profile.proto. +type Profile struct { + SampleType []*ValueType + DefaultSampleType string + Sample []*Sample + Mapping []*Mapping + Location []*Location + Function []*Function + Comments []string + + DropFrames string + KeepFrames string + + TimeNanos int64 + DurationNanos int64 + PeriodType *ValueType + Period int64 + + // The following fields are modified during encoding and copying, + // so are protected by a Mutex. + encodeMu sync.Mutex + + commentX []int64 + dropFramesX int64 + keepFramesX int64 + stringTable []string + defaultSampleTypeX int64 +} + +// ValueType corresponds to Profile.ValueType +type ValueType struct { + Type string // cpu, wall, inuse_space, etc + Unit string // seconds, nanoseconds, bytes, etc + + typeX int64 + unitX int64 +} + +// Sample corresponds to Profile.Sample +type Sample struct { + Location []*Location + Value []int64 + // Label is a per-label-key map to values for string labels. + // + // In general, having multiple values for the given label key is strongly + // discouraged - see docs for the sample label field in profile.proto. The + // main reason this unlikely state is tracked here is to make the + // decoding->encoding roundtrip not lossy. But we expect that the value + // slices present in this map are always of length 1. + Label map[string][]string + // NumLabel is a per-label-key map to values for numeric labels. See a note + // above on handling multiple values for a label. + NumLabel map[string][]int64 + // NumUnit is a per-label-key map to the unit names of corresponding numeric + // label values. The unit info may be missing even if the label is in + // NumLabel, see the docs in profile.proto for details. When the value is + // slice is present and not nil, its length must be equal to the length of + // the corresponding value slice in NumLabel. + NumUnit map[string][]string + + locationIDX []uint64 + labelX []label +} + +// label corresponds to Profile.Label +type label struct { + keyX int64 + // Exactly one of the two following values must be set + strX int64 + numX int64 // Integer value for this label + // can be set if numX has value + unitX int64 +} + +// Mapping corresponds to Profile.Mapping +type Mapping struct { + ID uint64 + Start uint64 + Limit uint64 + Offset uint64 + File string + BuildID string + HasFunctions bool + HasFilenames bool + HasLineNumbers bool + HasInlineFrames bool + + fileX int64 + buildIDX int64 + + // Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File. + // For linux kernel mappings generated by some tools, correct symbolization depends + // on knowing which of the two possible relocation symbols was used for `Start`. + // This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext"). + // + // Note, this public field is not persisted in the proto. For the purposes of + // copying / merging / hashing profiles, it is considered subsumed by `File`. + KernelRelocationSymbol string +} + +// Location corresponds to Profile.Location +type Location struct { + ID uint64 + Mapping *Mapping + Address uint64 + Line []Line + IsFolded bool + + mappingIDX uint64 +} + +// Line corresponds to Profile.Line +type Line struct { + Function *Function + Line int64 + + functionIDX uint64 +} + +// Function corresponds to Profile.Function +type Function struct { + ID uint64 + Name string + SystemName string + Filename string + StartLine int64 + + nameX int64 + systemNameX int64 + filenameX int64 +} + +// Parse parses a profile and checks for its validity. The input +// may be a gzip-compressed encoded protobuf or one of many legacy +// profile formats which may be unsupported in the future. +func Parse(r io.Reader) (*Profile, error) { + data, err := io.ReadAll(r) + if err != nil { + return nil, err + } + return ParseData(data) +} + +// ParseData parses a profile from a buffer and checks for its +// validity. +func ParseData(data []byte) (*Profile, error) { + var p *Profile + var err error + if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err == nil { + data, err = io.ReadAll(gz) + } + if err != nil { + return nil, fmt.Errorf("decompressing profile: %v", err) + } + } + if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile { + p, err = parseLegacy(data) + } + + if err != nil { + return nil, fmt.Errorf("parsing profile: %v", err) + } + + if err := p.CheckValid(); err != nil { + return nil, fmt.Errorf("malformed profile: %v", err) + } + return p, nil +} + +var errUnrecognized = fmt.Errorf("unrecognized profile format") +var errMalformed = fmt.Errorf("malformed profile format") +var errNoData = fmt.Errorf("empty input file") +var errConcatProfile = fmt.Errorf("concatenated profiles detected") + +func parseLegacy(data []byte) (*Profile, error) { + parsers := []func([]byte) (*Profile, error){ + parseCPU, + parseHeap, + parseGoCount, // goroutine, threadcreate + parseThread, + parseContention, + parseJavaProfile, + } + + for _, parser := range parsers { + p, err := parser(data) + if err == nil { + p.addLegacyFrameInfo() + return p, nil + } + if err != errUnrecognized { + return nil, err + } + } + return nil, errUnrecognized +} + +// ParseUncompressed parses an uncompressed protobuf into a profile. +func ParseUncompressed(data []byte) (*Profile, error) { + if len(data) == 0 { + return nil, errNoData + } + p := &Profile{} + if err := unmarshal(data, p); err != nil { + return nil, err + } + + if err := p.postDecode(); err != nil { + return nil, err + } + + return p, nil +} + +var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`) + +// massageMappings applies heuristic-based changes to the profile +// mappings to account for quirks of some environments. +func (p *Profile) massageMappings() { + // Merge adjacent regions with matching names, checking that the offsets match + if len(p.Mapping) > 1 { + mappings := []*Mapping{p.Mapping[0]} + for _, m := range p.Mapping[1:] { + lm := mappings[len(mappings)-1] + if adjacent(lm, m) { + lm.Limit = m.Limit + if m.File != "" { + lm.File = m.File + } + if m.BuildID != "" { + lm.BuildID = m.BuildID + } + p.updateLocationMapping(m, lm) + continue + } + mappings = append(mappings, m) + } + p.Mapping = mappings + } + + // Use heuristics to identify main binary and move it to the top of the list of mappings + for i, m := range p.Mapping { + file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1)) + if len(file) == 0 { + continue + } + if len(libRx.FindStringSubmatch(file)) > 0 { + continue + } + if file[0] == '[' { + continue + } + // Swap what we guess is main to position 0. + p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0] + break + } + + // Keep the mapping IDs neatly sorted + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +// adjacent returns whether two mapping entries represent the same +// mapping that has been split into two. Check that their addresses are adjacent, +// and if the offsets match, if they are available. +func adjacent(m1, m2 *Mapping) bool { + if m1.File != "" && m2.File != "" { + if m1.File != m2.File { + return false + } + } + if m1.BuildID != "" && m2.BuildID != "" { + if m1.BuildID != m2.BuildID { + return false + } + } + if m1.Limit != m2.Start { + return false + } + if m1.Offset != 0 && m2.Offset != 0 { + offset := m1.Offset + (m1.Limit - m1.Start) + if offset != m2.Offset { + return false + } + } + return true +} + +func (p *Profile) updateLocationMapping(from, to *Mapping) { + for _, l := range p.Location { + if l.Mapping == from { + l.Mapping = to + } + } +} + +func serialize(p *Profile) []byte { + p.encodeMu.Lock() + p.preEncode() + b := marshal(p) + p.encodeMu.Unlock() + return b +} + +// Write writes the profile as a gzip-compressed marshaled protobuf. +func (p *Profile) Write(w io.Writer) error { + zw := gzip.NewWriter(w) + defer zw.Close() + _, err := zw.Write(serialize(p)) + return err +} + +// WriteUncompressed writes the profile as a marshaled protobuf. +func (p *Profile) WriteUncompressed(w io.Writer) error { + _, err := w.Write(serialize(p)) + return err +} + +// CheckValid tests whether the profile is valid. Checks include, but are +// not limited to: +// - len(Profile.Sample[n].value) == len(Profile.value_unit) +// - Sample.id has a corresponding Profile.Location +func (p *Profile) CheckValid() error { + // Check that sample values are consistent + sampleLen := len(p.SampleType) + if sampleLen == 0 && len(p.Sample) != 0 { + return fmt.Errorf("missing sample type information") + } + for _, s := range p.Sample { + if s == nil { + return fmt.Errorf("profile has nil sample") + } + if len(s.Value) != sampleLen { + return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType)) + } + for _, l := range s.Location { + if l == nil { + return fmt.Errorf("sample has nil location") + } + } + } + + // Check that all mappings/locations/functions are in the tables + // Check that there are no duplicate ids + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + for _, m := range p.Mapping { + if m == nil { + return fmt.Errorf("profile has nil mapping") + } + if m.ID == 0 { + return fmt.Errorf("found mapping with reserved ID=0") + } + if mappings[m.ID] != nil { + return fmt.Errorf("multiple mappings with same id: %d", m.ID) + } + mappings[m.ID] = m + } + functions := make(map[uint64]*Function, len(p.Function)) + for _, f := range p.Function { + if f == nil { + return fmt.Errorf("profile has nil function") + } + if f.ID == 0 { + return fmt.Errorf("found function with reserved ID=0") + } + if functions[f.ID] != nil { + return fmt.Errorf("multiple functions with same id: %d", f.ID) + } + functions[f.ID] = f + } + locations := make(map[uint64]*Location, len(p.Location)) + for _, l := range p.Location { + if l == nil { + return fmt.Errorf("profile has nil location") + } + if l.ID == 0 { + return fmt.Errorf("found location with reserved id=0") + } + if locations[l.ID] != nil { + return fmt.Errorf("multiple locations with same id: %d", l.ID) + } + locations[l.ID] = l + if m := l.Mapping; m != nil { + if m.ID == 0 || mappings[m.ID] != m { + return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID) + } + } + for _, ln := range l.Line { + f := ln.Function + if f == nil { + return fmt.Errorf("location id: %d has a line with nil function", l.ID) + } + if f.ID == 0 || functions[f.ID] != f { + return fmt.Errorf("inconsistent function %p: %d", f, f.ID) + } + } + } + return nil +} + +// Aggregate merges the locations in the profile into equivalence +// classes preserving the request attributes. It also updates the +// samples to point to the merged locations. +func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error { + for _, m := range p.Mapping { + m.HasInlineFrames = m.HasInlineFrames && inlineFrame + m.HasFunctions = m.HasFunctions && function + m.HasFilenames = m.HasFilenames && filename + m.HasLineNumbers = m.HasLineNumbers && linenumber + } + + // Aggregate functions + if !function || !filename { + for _, f := range p.Function { + if !function { + f.Name = "" + f.SystemName = "" + } + if !filename { + f.Filename = "" + } + } + } + + // Aggregate locations + if !inlineFrame || !address || !linenumber { + for _, l := range p.Location { + if !inlineFrame && len(l.Line) > 1 { + l.Line = l.Line[len(l.Line)-1:] + } + if !linenumber { + for i := range l.Line { + l.Line[i].Line = 0 + } + } + if !address { + l.Address = 0 + } + } + } + + return p.CheckValid() +} + +// NumLabelUnits returns a map of numeric label keys to the units +// associated with those keys and a map of those keys to any units +// that were encountered but not used. +// Unit for a given key is the first encountered unit for that key. If multiple +// units are encountered for values paired with a particular key, then the first +// unit encountered is used and all other units are returned in sorted order +// in map of ignored units. +// If no units are encountered for a particular key, the unit is then inferred +// based on the key. +func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) { + numLabelUnits := map[string]string{} + ignoredUnits := map[string]map[string]bool{} + encounteredKeys := map[string]bool{} + + // Determine units based on numeric tags for each sample. + for _, s := range p.Sample { + for k := range s.NumLabel { + encounteredKeys[k] = true + for _, unit := range s.NumUnit[k] { + if unit == "" { + continue + } + if wantUnit, ok := numLabelUnits[k]; !ok { + numLabelUnits[k] = unit + } else if wantUnit != unit { + if v, ok := ignoredUnits[k]; ok { + v[unit] = true + } else { + ignoredUnits[k] = map[string]bool{unit: true} + } + } + } + } + } + // Infer units for keys without any units associated with + // numeric tag values. + for key := range encounteredKeys { + unit := numLabelUnits[key] + if unit == "" { + switch key { + case "alignment", "request": + numLabelUnits[key] = "bytes" + default: + numLabelUnits[key] = key + } + } + } + + // Copy ignored units into more readable format + unitsIgnored := make(map[string][]string, len(ignoredUnits)) + for key, values := range ignoredUnits { + units := make([]string, len(values)) + i := 0 + for unit := range values { + units[i] = unit + i++ + } + sort.Strings(units) + unitsIgnored[key] = units + } + + return numLabelUnits, unitsIgnored +} + +// String dumps a text representation of a profile. Intended mainly +// for debugging purposes. +func (p *Profile) String() string { + ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location)) + for _, c := range p.Comments { + ss = append(ss, "Comment: "+c) + } + if pt := p.PeriodType; pt != nil { + ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit)) + } + ss = append(ss, fmt.Sprintf("Period: %d", p.Period)) + if p.TimeNanos != 0 { + ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos))) + } + if p.DurationNanos != 0 { + ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos))) + } + + ss = append(ss, "Samples:") + var sh1 string + for _, s := range p.SampleType { + dflt := "" + if s.Type == p.DefaultSampleType { + dflt = "[dflt]" + } + sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt) + } + ss = append(ss, strings.TrimSpace(sh1)) + for _, s := range p.Sample { + ss = append(ss, s.string()) + } + + ss = append(ss, "Locations") + for _, l := range p.Location { + ss = append(ss, l.string()) + } + + ss = append(ss, "Mappings") + for _, m := range p.Mapping { + ss = append(ss, m.string()) + } + + return strings.Join(ss, "\n") + "\n" +} + +// string dumps a text representation of a mapping. Intended mainly +// for debugging purposes. +func (m *Mapping) string() string { + bits := "" + if m.HasFunctions { + bits = bits + "[FN]" + } + if m.HasFilenames { + bits = bits + "[FL]" + } + if m.HasLineNumbers { + bits = bits + "[LN]" + } + if m.HasInlineFrames { + bits = bits + "[IN]" + } + return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s", + m.ID, + m.Start, m.Limit, m.Offset, + m.File, + m.BuildID, + bits) +} + +// string dumps a text representation of a location. Intended mainly +// for debugging purposes. +func (l *Location) string() string { + ss := []string{} + locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address) + if m := l.Mapping; m != nil { + locStr = locStr + fmt.Sprintf("M=%d ", m.ID) + } + if l.IsFolded { + locStr = locStr + "[F] " + } + if len(l.Line) == 0 { + ss = append(ss, locStr) + } + for li := range l.Line { + lnStr := "??" + if fn := l.Line[li].Function; fn != nil { + lnStr = fmt.Sprintf("%s %s:%d s=%d", + fn.Name, + fn.Filename, + l.Line[li].Line, + fn.StartLine) + if fn.Name != fn.SystemName { + lnStr = lnStr + "(" + fn.SystemName + ")" + } + } + ss = append(ss, locStr+lnStr) + // Do not print location details past the first line + locStr = " " + } + return strings.Join(ss, "\n") +} + +// string dumps a text representation of a sample. Intended mainly +// for debugging purposes. +func (s *Sample) string() string { + ss := []string{} + var sv string + for _, v := range s.Value { + sv = fmt.Sprintf("%s %10d", sv, v) + } + sv = sv + ": " + for _, l := range s.Location { + sv = sv + fmt.Sprintf("%d ", l.ID) + } + ss = append(ss, sv) + const labelHeader = " " + if len(s.Label) > 0 { + ss = append(ss, labelHeader+labelsToString(s.Label)) + } + if len(s.NumLabel) > 0 { + ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit)) + } + return strings.Join(ss, "\n") +} + +// labelsToString returns a string representation of a +// map representing labels. +func labelsToString(labels map[string][]string) string { + ls := []string{} + for k, v := range labels { + ls = append(ls, fmt.Sprintf("%s:%v", k, v)) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// numLabelsToString returns a string representation of a map +// representing numeric labels. +func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string { + ls := []string{} + for k, v := range numLabels { + units := numUnits[k] + var labelString string + if len(units) == len(v) { + values := make([]string, len(v)) + for i, vv := range v { + values[i] = fmt.Sprintf("%d %s", vv, units[i]) + } + labelString = fmt.Sprintf("%s:%v", k, values) + } else { + labelString = fmt.Sprintf("%s:%v", k, v) + } + ls = append(ls, labelString) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// SetLabel sets the specified key to the specified value for all samples in the +// profile. +func (p *Profile) SetLabel(key string, value []string) { + for _, sample := range p.Sample { + if sample.Label == nil { + sample.Label = map[string][]string{key: value} + } else { + sample.Label[key] = value + } + } +} + +// RemoveLabel removes all labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveLabel(key string) { + for _, sample := range p.Sample { + delete(sample.Label, key) + } +} + +// HasLabel returns true if a sample has a label with indicated key and value. +func (s *Sample) HasLabel(key, value string) bool { + for _, v := range s.Label[key] { + if v == value { + return true + } + } + return false +} + +// SetNumLabel sets the specified key to the specified value for all samples in the +// profile. "unit" is a slice that describes the units that each corresponding member +// of "values" is measured in (e.g. bytes or seconds). If there is no relevant +// unit for a given value, that member of "unit" should be the empty string. +// "unit" must either have the same length as "value", or be nil. +func (p *Profile) SetNumLabel(key string, value []int64, unit []string) { + for _, sample := range p.Sample { + if sample.NumLabel == nil { + sample.NumLabel = map[string][]int64{key: value} + } else { + sample.NumLabel[key] = value + } + if sample.NumUnit == nil { + sample.NumUnit = map[string][]string{key: unit} + } else { + sample.NumUnit[key] = unit + } + } +} + +// RemoveNumLabel removes all numerical labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveNumLabel(key string) { + for _, sample := range p.Sample { + delete(sample.NumLabel, key) + delete(sample.NumUnit, key) + } +} + +// DiffBaseSample returns true if a sample belongs to the diff base and false +// otherwise. +func (s *Sample) DiffBaseSample() bool { + return s.HasLabel("pprof::base", "true") +} + +// Scale multiplies all sample values in a profile by a constant and keeps +// only samples that have at least one non-zero value. +func (p *Profile) Scale(ratio float64) { + if ratio == 1 { + return + } + ratios := make([]float64, len(p.SampleType)) + for i := range p.SampleType { + ratios[i] = ratio + } + p.ScaleN(ratios) +} + +// ScaleN multiplies each sample values in a sample by a different amount +// and keeps only samples that have at least one non-zero value. +func (p *Profile) ScaleN(ratios []float64) error { + if len(p.SampleType) != len(ratios) { + return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType)) + } + allOnes := true + for _, r := range ratios { + if r != 1 { + allOnes = false + break + } + } + if allOnes { + return nil + } + fillIdx := 0 + for _, s := range p.Sample { + keepSample := false + for i, v := range s.Value { + if ratios[i] != 1 { + val := int64(math.Round(float64(v) * ratios[i])) + s.Value[i] = val + keepSample = keepSample || val != 0 + } + } + if keepSample { + p.Sample[fillIdx] = s + fillIdx++ + } + } + p.Sample = p.Sample[:fillIdx] + return nil +} + +// HasFunctions determines if all locations in this profile have +// symbolized function information. +func (p *Profile) HasFunctions() bool { + for _, l := range p.Location { + if l.Mapping != nil && !l.Mapping.HasFunctions { + return false + } + } + return true +} + +// HasFileLines determines if all locations in this profile have +// symbolized file and line number information. +func (p *Profile) HasFileLines() bool { + for _, l := range p.Location { + if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) { + return false + } + } + return true +} + +// Unsymbolizable returns true if a mapping points to a binary for which +// locations can't be symbolized in principle, at least now. Examples are +// "[vdso]", [vsyscall]" and some others, see the code. +func (m *Mapping) Unsymbolizable() bool { + name := filepath.Base(m.File) + return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") +} + +// Copy makes a fully independent copy of a profile. +func (p *Profile) Copy() *Profile { + pp := &Profile{} + if err := unmarshal(serialize(p), pp); err != nil { + panic(err) + } + if err := pp.postDecode(); err != nil { + panic(err) + } + + return pp +} diff --git a/vendor/github.com/google/pprof/profile/proto.go b/vendor/github.com/google/pprof/profile/proto.go new file mode 100644 index 00000000..a15696ba --- /dev/null +++ b/vendor/github.com/google/pprof/profile/proto.go @@ -0,0 +1,367 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file is a simple protocol buffer encoder and decoder. +// The format is described at +// https://developers.google.com/protocol-buffers/docs/encoding +// +// A protocol message must implement the message interface: +// decoder() []decoder +// encode(*buffer) +// +// The decode method returns a slice indexed by field number that gives the +// function to decode that field. +// The encode method encodes its receiver into the given buffer. +// +// The two methods are simple enough to be implemented by hand rather than +// by using a protocol compiler. +// +// See profile.go for examples of messages implementing this interface. +// +// There is no support for groups, message sets, or "has" bits. + +package profile + +import ( + "errors" + "fmt" +) + +type buffer struct { + field int // field tag + typ int // proto wire type code for field + u64 uint64 + data []byte + tmp [16]byte + tmpLines []Line // temporary storage used while decoding "repeated Line". +} + +type decoder func(*buffer, message) error + +type message interface { + decoder() []decoder + encode(*buffer) +} + +func marshal(m message) []byte { + var b buffer + m.encode(&b) + return b.data +} + +func encodeVarint(b *buffer, x uint64) { + for x >= 128 { + b.data = append(b.data, byte(x)|0x80) + x >>= 7 + } + b.data = append(b.data, byte(x)) +} + +func encodeLength(b *buffer, tag int, len int) { + encodeVarint(b, uint64(tag)<<3|2) + encodeVarint(b, uint64(len)) +} + +func encodeUint64(b *buffer, tag int, x uint64) { + // append varint to b.data + encodeVarint(b, uint64(tag)<<3) + encodeVarint(b, x) +} + +func encodeUint64s(b *buffer, tag int, x []uint64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, u) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeUint64(b, tag, u) + } +} + +func encodeUint64Opt(b *buffer, tag int, x uint64) { + if x == 0 { + return + } + encodeUint64(b, tag, x) +} + +func encodeInt64(b *buffer, tag int, x int64) { + u := uint64(x) + encodeUint64(b, tag, u) +} + +func encodeInt64s(b *buffer, tag int, x []int64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, uint64(u)) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeInt64(b, tag, u) + } +} + +func encodeInt64Opt(b *buffer, tag int, x int64) { + if x == 0 { + return + } + encodeInt64(b, tag, x) +} + +func encodeString(b *buffer, tag int, x string) { + encodeLength(b, tag, len(x)) + b.data = append(b.data, x...) +} + +func encodeStrings(b *buffer, tag int, x []string) { + for _, s := range x { + encodeString(b, tag, s) + } +} + +func encodeBool(b *buffer, tag int, x bool) { + if x { + encodeUint64(b, tag, 1) + } else { + encodeUint64(b, tag, 0) + } +} + +func encodeBoolOpt(b *buffer, tag int, x bool) { + if x { + encodeBool(b, tag, x) + } +} + +func encodeMessage(b *buffer, tag int, m message) { + n1 := len(b.data) + m.encode(b) + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) +} + +func unmarshal(data []byte, m message) (err error) { + b := buffer{data: data, typ: 2} + return decodeMessage(&b, m) +} + +func le64(p []byte) uint64 { + return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56 +} + +func le32(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 +} + +func decodeVarint(data []byte) (uint64, []byte, error) { + var u uint64 + for i := 0; ; i++ { + if i >= 10 || i >= len(data) { + return 0, nil, errors.New("bad varint") + } + u |= uint64(data[i]&0x7F) << uint(7*i) + if data[i]&0x80 == 0 { + return u, data[i+1:], nil + } + } +} + +func decodeField(b *buffer, data []byte) ([]byte, error) { + x, data, err := decodeVarint(data) + if err != nil { + return nil, err + } + b.field = int(x >> 3) + b.typ = int(x & 7) + b.data = nil + b.u64 = 0 + switch b.typ { + case 0: + b.u64, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + case 1: + if len(data) < 8 { + return nil, errors.New("not enough data") + } + b.u64 = le64(data[:8]) + data = data[8:] + case 2: + var n uint64 + n, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + if n > uint64(len(data)) { + return nil, errors.New("too much data") + } + b.data = data[:n] + data = data[n:] + case 5: + if len(data) < 4 { + return nil, errors.New("not enough data") + } + b.u64 = uint64(le32(data[:4])) + data = data[4:] + default: + return nil, fmt.Errorf("unknown wire type: %d", b.typ) + } + + return data, nil +} + +func checkType(b *buffer, typ int) error { + if b.typ != typ { + return errors.New("type mismatch") + } + return nil +} + +func decodeMessage(b *buffer, m message) error { + if err := checkType(b, 2); err != nil { + return err + } + dec := m.decoder() + data := b.data + for len(data) > 0 { + // pull varint field# + type + var err error + data, err = decodeField(b, data) + if err != nil { + return err + } + if b.field >= len(dec) || dec[b.field] == nil { + continue + } + if err := dec[b.field](b, m); err != nil { + return err + } + } + return nil +} + +func decodeInt64(b *buffer, x *int64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = int64(b.u64) + return nil +} + +func decodeInt64s(b *buffer, x *[]int64) error { + if b.typ == 2 { + // Packed encoding + data := b.data + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + *x = append(*x, int64(u)) + } + return nil + } + var i int64 + if err := decodeInt64(b, &i); err != nil { + return err + } + *x = append(*x, i) + return nil +} + +func decodeUint64(b *buffer, x *uint64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = b.u64 + return nil +} + +func decodeUint64s(b *buffer, x *[]uint64) error { + if b.typ == 2 { + data := b.data + // Packed encoding + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + *x = append(*x, u) + } + return nil + } + var u uint64 + if err := decodeUint64(b, &u); err != nil { + return err + } + *x = append(*x, u) + return nil +} + +func decodeString(b *buffer, x *string) error { + if err := checkType(b, 2); err != nil { + return err + } + *x = string(b.data) + return nil +} + +func decodeStrings(b *buffer, x *[]string) error { + var s string + if err := decodeString(b, &s); err != nil { + return err + } + *x = append(*x, s) + return nil +} + +func decodeBool(b *buffer, x *bool) error { + if err := checkType(b, 0); err != nil { + return err + } + if int64(b.u64) == 0 { + *x = false + } else { + *x = true + } + return nil +} diff --git a/vendor/github.com/google/pprof/profile/prune.go b/vendor/github.com/google/pprof/profile/prune.go new file mode 100644 index 00000000..b2f9fd54 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/prune.go @@ -0,0 +1,194 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Implements methods to remove frames from profiles. + +package profile + +import ( + "fmt" + "regexp" + "strings" +) + +var ( + reservedNames = []string{"(anonymous namespace)", "operator()"} + bracketRx = func() *regexp.Regexp { + var quotedNames []string + for _, name := range append(reservedNames, "(") { + quotedNames = append(quotedNames, regexp.QuoteMeta(name)) + } + return regexp.MustCompile(strings.Join(quotedNames, "|")) + }() +) + +// simplifyFunc does some primitive simplification of function names. +func simplifyFunc(f string) string { + // Account for leading '.' on the PPC ELF v1 ABI. + funcName := strings.TrimPrefix(f, ".") + // Account for unsimplified names -- try to remove the argument list by trimming + // starting from the first '(', but skipping reserved names that have '('. + for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) { + foundReserved := false + for _, res := range reservedNames { + if funcName[ind[0]:ind[1]] == res { + foundReserved = true + break + } + } + if !foundReserved { + funcName = funcName[:ind[0]] + break + } + } + return funcName +} + +// Prune removes all nodes beneath a node matching dropRx, and not +// matching keepRx. If the root node of a Sample matches, the sample +// will have an empty stack. +func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) { + prune := make(map[uint64]bool) + pruneBeneath := make(map[uint64]bool) + + // simplifyFunc can be expensive, so cache results. + // Note that the same function name can be encountered many times due + // different lines and addresses in the same function. + pruneCache := map[string]bool{} // Map from function to whether or not to prune + pruneFromHere := func(s string) bool { + if r, ok := pruneCache[s]; ok { + return r + } + funcName := simplifyFunc(s) + if dropRx.MatchString(funcName) { + if keepRx == nil || !keepRx.MatchString(funcName) { + pruneCache[s] = true + return true + } + } + pruneCache[s] = false + return false + } + + for _, loc := range p.Location { + var i int + for i = len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + if pruneFromHere(fn.Name) { + break + } + } + } + + if i >= 0 { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + + // Remove the matching location. + if i == len(loc.Line)-1 { + // Matched the top entry: prune the whole location. + prune[loc.ID] = true + } else { + loc.Line = loc.Line[i+1:] + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the root to the leaves to find the prune location. + // Do not prune frames before the first user frame, to avoid + // pruning everything. + foundUser := false + for i := len(sample.Location) - 1; i >= 0; i-- { + id := sample.Location[i].ID + if !prune[id] && !pruneBeneath[id] { + foundUser = true + continue + } + if !foundUser { + continue + } + if prune[id] { + sample.Location = sample.Location[i+1:] + break + } + if pruneBeneath[id] { + sample.Location = sample.Location[i:] + break + } + } + } +} + +// RemoveUninteresting prunes and elides profiles using built-in +// tables of uninteresting function names. +func (p *Profile) RemoveUninteresting() error { + var keep, drop *regexp.Regexp + var err error + + if p.DropFrames != "" { + if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err) + } + if p.KeepFrames != "" { + if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err) + } + } + p.Prune(drop, keep) + } + return nil +} + +// PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself. +// +// Please see the example below to understand this method as well as +// the difference from Prune method. +// +// A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline. +// +// PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A. +// Prune(A, nil) returns [B,C,B,D] by removing A itself. +// +// PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom. +// Prune(B, nil) returns [D] because a matching node is found by scanning from the root. +func (p *Profile) PruneFrom(dropRx *regexp.Regexp) { + pruneBeneath := make(map[uint64]bool) + + for _, loc := range p.Location { + for i := 0; i < len(loc.Line); i++ { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + funcName := simplifyFunc(fn.Name) + if dropRx.MatchString(funcName) { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + loc.Line = loc.Line[i:] + break + } + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the bottom leaf to the root to find the prune location. + for i, loc := range sample.Location { + if pruneBeneath[loc.ID] { + sample.Location = sample.Location[i:] + break + } + } + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/LICENSE b/vendor/github.com/onsi/ginkgo/v2/LICENSE new file mode 100644 index 00000000..9415ee72 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013-2014 Onsi Fakhouri + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go b/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go new file mode 100644 index 00000000..a61021d0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go @@ -0,0 +1,69 @@ +package config + +// GinkgoConfigType has been deprecated and its equivalent now lives in +// the types package. You can no longer access Ginkgo configuration from the config +// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the +// current configuration +// +// GinkgoConfigType is still here so custom V1 reporters do not result in a compilation error +// It will be removed in a future minor release of Ginkgo +type GinkgoConfigType = DeprecatedGinkgoConfigType +type DeprecatedGinkgoConfigType struct { + RandomSeed int64 + RandomizeAllSpecs bool + RegexScansFilePath bool + FocusStrings []string + SkipStrings []string + SkipMeasurements bool + FailOnPending bool + FailFast bool + FlakeAttempts int + EmitSpecProgress bool + DryRun bool + DebugParallel bool + + ParallelNode int + ParallelTotal int + SyncHost string + StreamHost string +} + +// DefaultReporterConfigType has been deprecated and its equivalent now lives in +// the types package. You can no longer access Ginkgo configuration from the config +// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the +// current configuration +// +// DefaultReporterConfigType is still here so custom V1 reporters do not result in a compilation error +// It will be removed in a future minor release of Ginkgo +type DefaultReporterConfigType = DeprecatedDefaultReporterConfigType +type DeprecatedDefaultReporterConfigType struct { + NoColor bool + SlowSpecThreshold float64 + NoisyPendings bool + NoisySkippings bool + Succinct bool + Verbose bool + FullTrace bool + ReportPassed bool + ReportFile string +} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +type GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +var GinkgoConfig = GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +type DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{} + +// Sadly there is no way to gracefully deprecate access to these global config variables. +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method +// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails +var DefaultReporterConfig = DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{} diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go new file mode 100644 index 00000000..778bfd7c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go @@ -0,0 +1,41 @@ +// +build !windows + +/* +These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com + + * go-colorable: + * go-isatty: + +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package formatter + +import ( + "io" + "os" +) + +func newColorable(file *os.File) io.Writer { + return file +} diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go new file mode 100644 index 00000000..dd1d143c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go @@ -0,0 +1,809 @@ +/* +These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com + + * go-colorable: + * go-isatty: + +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package formatter + +import ( + "bytes" + "fmt" + "io" + "math" + "os" + "strconv" + "strings" + "syscall" + "unsafe" +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") +) + +func isTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type writer struct { + out io.Writer + handle syscall.Handle + lastbuf bytes.Buffer + oldattr word +} + +func newColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isTerminal(file.Fd()) { + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &writer{out: file, handle: handle, oldattr: csbi.attributes} + } else { + return file + } +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +func (w *writer) Write(data []byte) (n int, err error) { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + er := bytes.NewBuffer(data) +loop: + for { + r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + if r1 == 0 { + break loop + } + + c1, _, err := er.ReadRune() + if err != nil { + break loop + } + if c1 != 0x1b { + fmt.Fprint(w.out, string(c1)) + continue + } + c2, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + break loop + } + if c2 != 0x5b { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + continue + } + + var buf bytes.Buffer + var m rune + for { + c, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + w.lastbuf.Write(buf.Bytes()) + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + break + } + buf.Write([]byte(string(c))) + } + + var csbi consoleScreenBufferInfo + switch m { + case 'A': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n, err = strconv.Atoi(buf.String()); err == nil { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + } + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H': + token := strings.Split(buf.String(), ";") + if len(token) != 2 { + continue + } + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2) + csbi.cursorPosition.x = short(n1) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i += 1 { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case 1 <= n && n <= 5: + attr |= foregroundIntensity + case n == 7: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 22 == n || n == 25 || n == 25: + attr |= foregroundIntensity + case n == 27: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 30 <= n && n <= 37: + attr = (attr & backgroundMask) + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr = (attr & foregroundMask) + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr)) + } + } + } + } + return len(data) - w.lastbuf.Len(), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + consoleColor{0x000000, false, false, false, false}, + consoleColor{0x000080, false, false, true, false}, + consoleColor{0x008000, false, true, false, false}, + consoleColor{0x008080, false, true, true, false}, + consoleColor{0x800000, true, false, false, false}, + consoleColor{0x800080, true, false, true, false}, + consoleColor{0x808000, true, true, false, false}, + consoleColor{0xc0c0c0, true, true, true, false}, + consoleColor{0x808080, false, false, false, true}, + consoleColor{0x0000ff, false, false, true, true}, + consoleColor{0x00ff00, false, true, false, true}, + consoleColor{0x00ffff, false, true, true, true}, + consoleColor{0xff0000, true, false, false, true}, + consoleColor{0xff00ff, true, false, true, true}, + consoleColor{0xffff00, true, true, false, true}, + consoleColor{0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go new file mode 100644 index 00000000..743555dd --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go @@ -0,0 +1,230 @@ +package formatter + +import ( + "fmt" + "os" + "regexp" + "strconv" + "strings" +) + +// ColorableStdOut and ColorableStdErr enable color output support on Windows +var ColorableStdOut = newColorable(os.Stdout) +var ColorableStdErr = newColorable(os.Stderr) + +const COLS = 80 + +type ColorMode uint8 + +const ( + ColorModeNone ColorMode = iota + ColorModeTerminal + ColorModePassthrough +) + +var SingletonFormatter = New(ColorModeTerminal) + +func F(format string, args ...interface{}) string { + return SingletonFormatter.F(format, args...) +} + +func Fi(indentation uint, format string, args ...interface{}) string { + return SingletonFormatter.Fi(indentation, format, args...) +} + +func Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string { + return SingletonFormatter.Fiw(indentation, maxWidth, format, args...) +} + +type Formatter struct { + ColorMode ColorMode + colors map[string]string + styleRe *regexp.Regexp + preserveColorStylingTags bool +} + +func NewWithNoColorBool(noColor bool) Formatter { + if noColor { + return New(ColorModeNone) + } + return New(ColorModeTerminal) +} + +func New(colorMode ColorMode) Formatter { + colorAliases := map[string]int{ + "black": 0, + "red": 1, + "green": 2, + "yellow": 3, + "blue": 4, + "magenta": 5, + "cyan": 6, + "white": 7, + } + for colorAlias, n := range colorAliases { + colorAliases[fmt.Sprintf("bright-%s", colorAlias)] = n + 8 + } + + getColor := func(color, defaultEscapeCode string) string { + color = strings.ToUpper(strings.ReplaceAll(color, "-", "_")) + envVar := fmt.Sprintf("GINKGO_CLI_COLOR_%s", color) + envVarColor := os.Getenv(envVar) + if envVarColor == "" { + return defaultEscapeCode + } + if colorCode, ok := colorAliases[envVarColor]; ok { + return fmt.Sprintf("\x1b[38;5;%dm", colorCode) + } + colorCode, err := strconv.Atoi(envVarColor) + if err != nil || colorCode < 0 || colorCode > 255 { + return defaultEscapeCode + } + return fmt.Sprintf("\x1b[38;5;%dm", colorCode) + } + + f := Formatter{ + ColorMode: colorMode, + colors: map[string]string{ + "/": "\x1b[0m", + "bold": "\x1b[1m", + "underline": "\x1b[4m", + + "red": getColor("red", "\x1b[38;5;9m"), + "orange": getColor("orange", "\x1b[38;5;214m"), + "coral": getColor("coral", "\x1b[38;5;204m"), + "magenta": getColor("magenta", "\x1b[38;5;13m"), + "green": getColor("green", "\x1b[38;5;10m"), + "dark-green": getColor("dark-green", "\x1b[38;5;28m"), + "yellow": getColor("yellow", "\x1b[38;5;11m"), + "light-yellow": getColor("light-yellow", "\x1b[38;5;228m"), + "cyan": getColor("cyan", "\x1b[38;5;14m"), + "gray": getColor("gray", "\x1b[38;5;243m"), + "light-gray": getColor("light-gray", "\x1b[38;5;246m"), + "blue": getColor("blue", "\x1b[38;5;12m"), + }, + } + colors := []string{} + for color := range f.colors { + colors = append(colors, color) + } + f.styleRe = regexp.MustCompile("{{(" + strings.Join(colors, "|") + ")}}") + return f +} + +func (f Formatter) F(format string, args ...interface{}) string { + return f.Fi(0, format, args...) +} + +func (f Formatter) Fi(indentation uint, format string, args ...interface{}) string { + return f.Fiw(indentation, 0, format, args...) +} + +func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string { + out := f.style(format) + if len(args) > 0 { + out = fmt.Sprintf(out, args...) + } + + if indentation == 0 && maxWidth == 0 { + return out + } + + lines := strings.Split(out, "\n") + + if maxWidth != 0 { + outLines := []string{} + + maxWidth = maxWidth - indentation*2 + for _, line := range lines { + if f.length(line) <= maxWidth { + outLines = append(outLines, line) + continue + } + words := strings.Split(line, " ") + outWords := []string{words[0]} + length := uint(f.length(words[0])) + for _, word := range words[1:] { + wordLength := f.length(word) + if length+wordLength+1 <= maxWidth { + length += wordLength + 1 + outWords = append(outWords, word) + continue + } + outLines = append(outLines, strings.Join(outWords, " ")) + outWords = []string{word} + length = wordLength + } + if len(outWords) > 0 { + outLines = append(outLines, strings.Join(outWords, " ")) + } + } + + lines = outLines + } + + if indentation == 0 { + return strings.Join(lines, "\n") + } + + padding := strings.Repeat(" ", int(indentation)) + for i := range lines { + if lines[i] != "" { + lines[i] = padding + lines[i] + } + } + + return strings.Join(lines, "\n") +} + +func (f Formatter) length(styled string) uint { + n := uint(0) + inStyle := false + for _, b := range styled { + if inStyle { + if b == 'm' { + inStyle = false + } + continue + } + if b == '\x1b' { + inStyle = true + continue + } + n += 1 + } + return n +} + +func (f Formatter) CycleJoin(elements []string, joiner string, cycle []string) string { + if len(elements) == 0 { + return "" + } + n := len(cycle) + out := "" + for i, text := range elements { + out += cycle[i%n] + text + if i < len(elements)-1 { + out += joiner + } + } + out += "{{/}}" + return f.style(out) +} + +func (f Formatter) style(s string) string { + switch f.ColorMode { + case ColorModeNone: + return f.styleRe.ReplaceAllString(s, "") + case ColorModePassthrough: + return s + case ColorModeTerminal: + return f.styleRe.ReplaceAllStringFunc(s, func(match string) string { + if out, ok := f.colors[strings.Trim(match, "{}")]; ok { + return out + } + return match + }) + } + + return "" +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go new file mode 100644 index 00000000..2efd2860 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go @@ -0,0 +1,61 @@ +package command + +import "fmt" + +type AbortDetails struct { + ExitCode int + Error error + EmitUsage bool +} + +func Abort(details AbortDetails) { + panic(details) +} + +func AbortGracefullyWith(format string, args ...interface{}) { + Abort(AbortDetails{ + ExitCode: 0, + Error: fmt.Errorf(format, args...), + EmitUsage: false, + }) +} + +func AbortWith(format string, args ...interface{}) { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf(format, args...), + EmitUsage: false, + }) +} + +func AbortWithUsage(format string, args ...interface{}) { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf(format, args...), + EmitUsage: true, + }) +} + +func AbortIfError(preamble string, err error) { + if err != nil { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf("%s\n%s", preamble, err.Error()), + EmitUsage: false, + }) + } +} + +func AbortIfErrors(preamble string, errors []error) { + if len(errors) > 0 { + out := "" + for _, err := range errors { + out += err.Error() + } + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf("%s\n%s", preamble, out), + EmitUsage: false, + }) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go new file mode 100644 index 00000000..12e0e565 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go @@ -0,0 +1,50 @@ +package command + +import ( + "fmt" + "io" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type Command struct { + Name string + Flags types.GinkgoFlagSet + Usage string + ShortDoc string + Documentation string + DocLink string + Command func(args []string, additionalArgs []string) +} + +func (c Command) Run(args []string, additionalArgs []string) { + args, err := c.Flags.Parse(args) + if err != nil { + AbortWithUsage(err.Error()) + } + + c.Command(args, additionalArgs) +} + +func (c Command) EmitUsage(writer io.Writer) { + fmt.Fprintln(writer, formatter.F("{{bold}}"+c.Usage+"{{/}}")) + fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(c.Usage)))) + if c.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.ShortDoc)) + fmt.Fprintln(writer, "") + } + if c.Documentation != "" { + fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.Documentation)) + fmt.Fprintln(writer, "") + } + if c.DocLink != "" { + fmt.Fprintln(writer, formatter.Fi(0, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}", c.DocLink)) + fmt.Fprintln(writer, "") + } + flagUsage := c.Flags.Usage() + if flagUsage != "" { + fmt.Fprintf(writer, formatter.F(flagUsage)) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go new file mode 100644 index 00000000..88dd8d6b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go @@ -0,0 +1,182 @@ +package command + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type Program struct { + Name string + Heading string + Commands []Command + DefaultCommand Command + DeprecatedCommands []DeprecatedCommand + + //For testing - leave as nil in production + OutWriter io.Writer + ErrWriter io.Writer + Exiter func(code int) +} + +type DeprecatedCommand struct { + Name string + Deprecation types.Deprecation +} + +func (p Program) RunAndExit(osArgs []string) { + var command Command + deprecationTracker := types.NewDeprecationTracker() + if p.Exiter == nil { + p.Exiter = os.Exit + } + if p.OutWriter == nil { + p.OutWriter = formatter.ColorableStdOut + } + if p.ErrWriter == nil { + p.ErrWriter = formatter.ColorableStdErr + } + + defer func() { + exitCode := 0 + + if r := recover(); r != nil { + details, ok := r.(AbortDetails) + if !ok { + panic(r) + } + + if details.Error != nil { + fmt.Fprintln(p.ErrWriter, formatter.F("{{red}}{{bold}}%s %s{{/}} {{red}}failed{{/}}", p.Name, command.Name)) + fmt.Fprintln(p.ErrWriter, formatter.Fi(1, details.Error.Error())) + } + if details.EmitUsage { + if details.Error != nil { + fmt.Fprintln(p.ErrWriter, "") + } + command.EmitUsage(p.ErrWriter) + } + exitCode = details.ExitCode + } + + command.Flags.ValidateDeprecations(deprecationTracker) + if deprecationTracker.DidTrackDeprecations() { + fmt.Fprintln(p.ErrWriter, deprecationTracker.DeprecationsReport()) + } + p.Exiter(exitCode) + return + }() + + args, additionalArgs := []string{}, []string{} + + foundDelimiter := false + for _, arg := range osArgs[1:] { + if !foundDelimiter { + if arg == "--" { + foundDelimiter = true + continue + } + } + + if foundDelimiter { + additionalArgs = append(additionalArgs, arg) + } else { + args = append(args, arg) + } + } + + command = p.DefaultCommand + if len(args) > 0 { + p.handleHelpRequestsAndExit(p.OutWriter, args) + if command.Name == args[0] { + args = args[1:] + } else { + for _, deprecatedCommand := range p.DeprecatedCommands { + if deprecatedCommand.Name == args[0] { + deprecationTracker.TrackDeprecation(deprecatedCommand.Deprecation) + return + } + } + for _, tryCommand := range p.Commands { + if tryCommand.Name == args[0] { + command, args = tryCommand, args[1:] + break + } + } + } + } + + command.Run(args, additionalArgs) +} + +func (p Program) handleHelpRequestsAndExit(writer io.Writer, args []string) { + if len(args) == 0 { + return + } + + matchesHelpFlag := func(args ...string) bool { + for _, arg := range args { + if arg == "--help" || arg == "-help" || arg == "-h" || arg == "--h" { + return true + } + } + return false + } + if len(args) == 1 { + if args[0] == "help" || matchesHelpFlag(args[0]) { + p.EmitUsage(writer) + Abort(AbortDetails{}) + } + } else { + var name string + if args[0] == "help" || matchesHelpFlag(args[0]) { + name = args[1] + } else if matchesHelpFlag(args[1:]...) { + name = args[0] + } else { + return + } + + if p.DefaultCommand.Name == name || p.Name == name { + p.DefaultCommand.EmitUsage(writer) + Abort(AbortDetails{}) + } + for _, command := range p.Commands { + if command.Name == name { + command.EmitUsage(writer) + Abort(AbortDetails{}) + } + } + + fmt.Fprintln(writer, formatter.F("{{red}}Unknown Command: {{bold}}%s{{/}}", name)) + fmt.Fprintln(writer, "") + p.EmitUsage(writer) + Abort(AbortDetails{ExitCode: 1}) + } + return +} + +func (p Program) EmitUsage(writer io.Writer) { + fmt.Fprintln(writer, formatter.F(p.Heading)) + fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(p.Heading)))) + fmt.Fprintln(writer, formatter.F("For usage information for a command, run {{bold}}%s help COMMAND{{/}}.", p.Name)) + fmt.Fprintln(writer, formatter.F("For usage information for the default command, run {{bold}}%s help %s{{/}} or {{bold}}%s help %s{{/}}.", p.Name, p.Name, p.Name, p.DefaultCommand.Name)) + fmt.Fprintln(writer, "") + fmt.Fprintln(writer, formatter.F("The following commands are available:")) + + fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} or %s {{bold}}%s{{/}} - {{gray}}%s{{/}}", p.Name, p.Name, p.DefaultCommand.Name, p.DefaultCommand.Usage)) + if p.DefaultCommand.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fi(2, p.DefaultCommand.ShortDoc)) + } + + for _, command := range p.Commands { + fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} - {{gray}}%s{{/}}", command.Name, command.Usage)) + if command.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fi(2, command.ShortDoc)) + } + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go new file mode 100644 index 00000000..a367a1fc --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go @@ -0,0 +1,48 @@ +package generators + +var bootstrapText = `package {{.Package}} + +import ( + "testing" + + {{.GinkgoImport}} + {{.GomegaImport}} +) + +func Test{{.FormattedName}}(t *testing.T) { + {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail) + {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite") +} +` + +var agoutiBootstrapText = `package {{.Package}} + +import ( + "testing" + + {{.GinkgoImport}} + {{.GomegaImport}} + "github.com/sclevine/agouti" +) + +func Test{{.FormattedName}}(t *testing.T) { + {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail) + {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite") +} + +var agoutiDriver *agouti.WebDriver + +var _ = {{.GinkgoPackage}}BeforeSuite(func() { + // Choose a WebDriver: + + agoutiDriver = agouti.PhantomJS() + // agoutiDriver = agouti.Selenium() + // agoutiDriver = agouti.ChromeDriver() + + {{.GomegaPackage}}Expect(agoutiDriver.Start()).To({{.GomegaPackage}}Succeed()) +}) + +var _ = {{.GinkgoPackage}}AfterSuite(func() { + {{.GomegaPackage}}Expect(agoutiDriver.Stop()).To({{.GomegaPackage}}Succeed()) +}) +` diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go new file mode 100644 index 00000000..73aff0b7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go @@ -0,0 +1,133 @@ +package generators + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "text/template" + + sprig "github.com/go-task/slim-sprig" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildBootstrapCommand() command.Command { + conf := GeneratorsConfig{} + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "agouti", KeyPath: "Agouti", + Usage: "If set, bootstrap will generate a bootstrap file for writing Agouti tests"}, + {Name: "nodot", KeyPath: "NoDot", + Usage: "If set, bootstrap will generate a bootstrap test file that does not dot-import ginkgo and gomega"}, + {Name: "internal", KeyPath: "Internal", + Usage: "If set, bootstrap will generate a bootstrap test file that uses the regular package name (i.e. `package X`, not `package X_test`)"}, + {Name: "template", KeyPath: "CustomTemplate", + UsageArgument: "template-file", + Usage: "If specified, generate will use the contents of the file passed as the bootstrap template"}, + {Name: "template-data", KeyPath: "CustomTemplateData", + UsageArgument: "template-data-file", + Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the bootstrap template"}, + }, + &conf, + types.GinkgoFlagSections{}, + ) + + if err != nil { + panic(err) + } + + return command.Command{ + Name: "bootstrap", + Usage: "ginkgo bootstrap", + ShortDoc: "Bootstrap a test suite for the current package", + Documentation: `Tests written in Ginkgo and Gomega require a small amount of boilerplate to hook into Go's testing infrastructure. + +{{bold}}ginkgo bootstrap{{/}} generates this boilerplate for you in a file named X_suite_test.go where X is the name of the package under test.`, + DocLink: "generators", + Flags: flags, + Command: func(_ []string, _ []string) { + generateBootstrap(conf) + }, + } +} + +type bootstrapData struct { + Package string + FormattedName string + + GinkgoImport string + GomegaImport string + GinkgoPackage string + GomegaPackage string + CustomData map[string]any +} + +func generateBootstrap(conf GeneratorsConfig) { + packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName() + + data := bootstrapData{ + Package: determinePackageName(packageName, conf.Internal), + FormattedName: formattedName, + + GinkgoImport: `. "github.com/onsi/ginkgo/v2"`, + GomegaImport: `. "github.com/onsi/gomega"`, + GinkgoPackage: "", + GomegaPackage: "", + } + + if conf.NoDot { + data.GinkgoImport = `"github.com/onsi/ginkgo/v2"` + data.GomegaImport = `"github.com/onsi/gomega"` + data.GinkgoPackage = `ginkgo.` + data.GomegaPackage = `gomega.` + } + + targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix) + if internal.FileExists(targetFile) { + command.AbortWith("{{bold}}%s{{/}} already exists", targetFile) + } else { + fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile) + } + + f, err := os.Create(targetFile) + command.AbortIfError("Failed to create file:", err) + defer f.Close() + + var templateText string + if conf.CustomTemplate != "" { + tpl, err := os.ReadFile(conf.CustomTemplate) + command.AbortIfError("Failed to read custom bootstrap file:", err) + templateText = string(tpl) + if conf.CustomTemplateData != "" { + var tplCustomDataMap map[string]any + tplCustomData, err := os.ReadFile(conf.CustomTemplateData) + command.AbortIfError("Failed to read custom boostrap data file:", err) + if !json.Valid([]byte(tplCustomData)) { + command.AbortWith("Invalid JSON object in custom data file.") + } + //create map from the custom template data + json.Unmarshal(tplCustomData, &tplCustomDataMap) + data.CustomData = tplCustomDataMap + } + } else if conf.Agouti { + templateText = agoutiBootstrapText + } else { + templateText = bootstrapText + } + + //Setting the option to explicitly fail if template is rendered trying to access missing key + bootstrapTemplate, err := template.New("bootstrap").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText) + command.AbortIfError("Failed to parse bootstrap template:", err) + + buf := &bytes.Buffer{} + //Being explicit about failing sooner during template rendering + //when accessing custom data rather than during the go fmt command + err = bootstrapTemplate.Execute(buf, data) + command.AbortIfError("Failed to render bootstrap template:", err) + + buf.WriteTo(f) + + internal.GoFmt(targetFile) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go new file mode 100644 index 00000000..be01dec9 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go @@ -0,0 +1,264 @@ +package generators + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "text/template" + + sprig "github.com/go-task/slim-sprig" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildGenerateCommand() command.Command { + conf := GeneratorsConfig{} + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "agouti", KeyPath: "Agouti", + Usage: "If set, generate will create a test file for writing Agouti tests"}, + {Name: "nodot", KeyPath: "NoDot", + Usage: "If set, generate will create a test file that does not dot-import ginkgo and gomega"}, + {Name: "internal", KeyPath: "Internal", + Usage: "If set, generate will create a test file that uses the regular package name (i.e. `package X`, not `package X_test`)"}, + {Name: "template", KeyPath: "CustomTemplate", + UsageArgument: "template-file", + Usage: "If specified, generate will use the contents of the file passed as the test file template"}, + {Name: "template-data", KeyPath: "CustomTemplateData", + UsageArgument: "template-data-file", + Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the test file template"}, + {Name: "tags", KeyPath: "Tags", + UsageArgument: "build-tags", + Usage: "If specified, generate will create a test file that uses the given build tags (i.e. `--tags e2e,!unit` will add `//go:build e2e,!unit`)"}, + }, + &conf, + types.GinkgoFlagSections{}, + ) + + if err != nil { + panic(err) + } + + return command.Command{ + Name: "generate", + Usage: "ginkgo generate ", + ShortDoc: "Generate a test file named _test.go", + Documentation: `If the optional argument is omitted, a file named after the package in the current directory will be created. + +You can pass multiple to generate multiple files simultaneously. The resulting files are named _test.go. + +You can also pass a of the form "file.go" and generate will emit "file_test.go".`, + DocLink: "generators", + Flags: flags, + Command: func(args []string, _ []string) { + generateTestFiles(conf, args) + }, + } +} + +type specData struct { + BuildTags string + Package string + Subject string + PackageImportPath string + ImportPackage bool + + GinkgoImport string + GomegaImport string + GinkgoPackage string + GomegaPackage string + CustomData map[string]any +} + +func generateTestFiles(conf GeneratorsConfig, args []string) { + subjects := args + if len(subjects) == 0 { + subjects = []string{""} + } + for _, subject := range subjects { + generateTestFileForSubject(subject, conf) + } +} + +func generateTestFileForSubject(subject string, conf GeneratorsConfig) { + packageName, specFilePrefix, formattedName := getPackageAndFormattedName() + if subject != "" { + specFilePrefix = formatSubject(subject) + formattedName = prettifyName(specFilePrefix) + } + + if conf.Internal { + specFilePrefix = specFilePrefix + "_internal" + } + + data := specData{ + BuildTags: getBuildTags(conf.Tags), + Package: determinePackageName(packageName, conf.Internal), + Subject: formattedName, + PackageImportPath: getPackageImportPath(), + ImportPackage: !conf.Internal, + + GinkgoImport: `. "github.com/onsi/ginkgo/v2"`, + GomegaImport: `. "github.com/onsi/gomega"`, + GinkgoPackage: "", + GomegaPackage: "", + } + + if conf.NoDot { + data.GinkgoImport = `"github.com/onsi/ginkgo/v2"` + data.GomegaImport = `"github.com/onsi/gomega"` + data.GinkgoPackage = `ginkgo.` + data.GomegaPackage = `gomega.` + } + + targetFile := fmt.Sprintf("%s_test.go", specFilePrefix) + if internal.FileExists(targetFile) { + command.AbortWith("{{bold}}%s{{/}} already exists", targetFile) + } else { + fmt.Printf("Generating ginkgo test for %s in:\n %s\n", data.Subject, targetFile) + } + + f, err := os.Create(targetFile) + command.AbortIfError("Failed to create test file:", err) + defer f.Close() + + var templateText string + if conf.CustomTemplate != "" { + tpl, err := os.ReadFile(conf.CustomTemplate) + command.AbortIfError("Failed to read custom template file:", err) + templateText = string(tpl) + if conf.CustomTemplateData != "" { + var tplCustomDataMap map[string]any + tplCustomData, err := os.ReadFile(conf.CustomTemplateData) + command.AbortIfError("Failed to read custom template data file:", err) + if !json.Valid([]byte(tplCustomData)) { + command.AbortWith("Invalid JSON object in custom data file.") + } + //create map from the custom template data + json.Unmarshal(tplCustomData, &tplCustomDataMap) + data.CustomData = tplCustomDataMap + } + } else if conf.Agouti { + templateText = agoutiSpecText + } else { + templateText = specText + } + + //Setting the option to explicitly fail if template is rendered trying to access missing key + specTemplate, err := template.New("spec").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText) + command.AbortIfError("Failed to read parse test template:", err) + + //Being explicit about failing sooner during template rendering + //when accessing custom data rather than during the go fmt command + err = specTemplate.Execute(f, data) + command.AbortIfError("Failed to render bootstrap template:", err) + internal.GoFmt(targetFile) +} + +func formatSubject(name string) string { + name = strings.ReplaceAll(name, "-", "_") + name = strings.ReplaceAll(name, " ", "_") + name = strings.Split(name, ".go")[0] + name = strings.Split(name, "_test")[0] + return name +} + +// moduleName returns module name from go.mod from given module root directory +func moduleName(modRoot string) string { + modFile, err := os.Open(filepath.Join(modRoot, "go.mod")) + if err != nil { + return "" + } + + mod := make([]byte, 128) + _, err = modFile.Read(mod) + if err != nil { + return "" + } + + slashSlash := []byte("//") + moduleStr := []byte("module") + + for len(mod) > 0 { + line := mod + mod = nil + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, mod = line[:i], line[i+1:] + } + if i := bytes.Index(line, slashSlash); i >= 0 { + line = line[:i] + } + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, moduleStr) { + continue + } + line = line[len(moduleStr):] + n := len(line) + line = bytes.TrimSpace(line) + if len(line) == n || len(line) == 0 { + continue + } + + if line[0] == '"' || line[0] == '`' { + p, err := strconv.Unquote(string(line)) + if err != nil { + return "" // malformed quoted string or multiline module path + } + return p + } + + return string(line) + } + + return "" // missing module path +} + +func findModuleRoot(dir string) (root string) { + dir = filepath.Clean(dir) + + // Look for enclosing go.mod. + for { + if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() { + return dir + } + d := filepath.Dir(dir) + if d == dir { + break + } + dir = d + } + return "" +} + +func getPackageImportPath() string { + workingDir, err := os.Getwd() + if err != nil { + panic(err.Error()) + } + + sep := string(filepath.Separator) + + // Try go.mod file first + modRoot := findModuleRoot(workingDir) + if modRoot != "" { + modName := moduleName(modRoot) + if modName != "" { + cd := strings.ReplaceAll(workingDir, modRoot, "") + cd = strings.ReplaceAll(cd, sep, "/") + return modName + cd + } + } + + // Fallback to GOPATH structure + paths := strings.Split(workingDir, sep+"src"+sep) + if len(paths) == 1 { + fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n") + return "UNKNOWN_PACKAGE_PATH" + } + return filepath.ToSlash(paths[len(paths)-1]) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go new file mode 100644 index 00000000..4dab07d0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go @@ -0,0 +1,43 @@ +package generators + +var specText = `{{.BuildTags}} +package {{.Package}} + +import ( + {{.GinkgoImport}} + {{.GomegaImport}} + + {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}} +) + +var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() { + +}) +` + +var agoutiSpecText = `{{.BuildTags}} +package {{.Package}} + +import ( + {{.GinkgoImport}} + {{.GomegaImport}} + "github.com/sclevine/agouti" + . "github.com/sclevine/agouti/matchers" + + {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}} +) + +var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() { + var page *agouti.Page + + {{.GinkgoPackage}}BeforeEach(func() { + var err error + page, err = agoutiDriver.NewPage() + {{.GomegaPackage}}Expect(err).NotTo({{.GomegaPackage}}HaveOccurred()) + }) + + {{.GinkgoPackage}}AfterEach(func() { + {{.GomegaPackage}}Expect(page.Destroy()).To({{.GomegaPackage}}Succeed()) + }) +}) +` diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go new file mode 100644 index 00000000..28c7aa6f --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go @@ -0,0 +1,76 @@ +package generators + +import ( + "fmt" + "go/build" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +type GeneratorsConfig struct { + Agouti, NoDot, Internal bool + CustomTemplate string + CustomTemplateData string + Tags string +} + +func getPackageAndFormattedName() (string, string, string) { + path, err := os.Getwd() + command.AbortIfError("Could not get current working directory:", err) + + dirName := strings.ReplaceAll(filepath.Base(path), "-", "_") + dirName = strings.ReplaceAll(dirName, " ", "_") + + pkg, err := build.ImportDir(path, 0) + packageName := pkg.Name + if err != nil { + packageName = ensureLegalPackageName(dirName) + } + + formattedName := prettifyName(filepath.Base(path)) + return packageName, dirName, formattedName +} + +func ensureLegalPackageName(name string) string { + if name == "_" { + return "underscore" + } + if len(name) == 0 { + return "empty" + } + n, isDigitErr := strconv.Atoi(string(name[0])) + if isDigitErr == nil { + return []string{"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"}[n] + name[1:] + } + return name +} + +func prettifyName(name string) string { + name = strings.ReplaceAll(name, "-", " ") + name = strings.ReplaceAll(name, "_", " ") + name = strings.Title(name) + name = strings.ReplaceAll(name, " ", "") + return name +} + +func determinePackageName(name string, internal bool) string { + if internal { + return name + } + + return name + "_test" +} + +// getBuildTags returns the resultant string to be added. +// If the input string is not empty, then returns a `//go:build {}` string, +// otherwise returns an empty string. +func getBuildTags(tags string) string { + if tags != "" { + return fmt.Sprintf("//go:build %s\n", tags) + } + return "" +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go new file mode 100644 index 00000000..86da7340 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go @@ -0,0 +1,161 @@ +package internal + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + + "github.com/onsi/ginkgo/v2/types" +) + +func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite { + if suite.PathToCompiledTest != "" { + return suite + } + + suite.CompilationError = nil + + path, err := filepath.Abs(filepath.Join(suite.Path, suite.PackageName+".test")) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compute compilation target path:\n%s", err.Error()) + return suite + } + + ginkgoInvocationPath, _ := os.Getwd() + ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath) + packagePath := suite.AbsPath() + pathToInvocationPath, err := filepath.Rel(packagePath, ginkgoInvocationPath) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error()) + return suite + } + args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./", pathToInvocationPath) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error()) + return suite + } + + cmd := exec.Command("go", args...) + cmd.Dir = suite.Path + output, err := cmd.CombinedOutput() + if err != nil { + if len(output) > 0 { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s:\n\n%s", suite.PackageName, output) + } else { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s\n%s", suite.PackageName, err.Error()) + } + return suite + } + + if strings.Contains(string(output), "[no test files]") { + suite.State = TestSuiteStateSkippedDueToEmptyCompilation + return suite + } + + if len(output) > 0 { + fmt.Println(string(output)) + } + + if !FileExists(path) { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s:\nOutput file %s could not be found", suite.PackageName, path) + return suite + } + + suite.State = TestSuiteStateCompiled + suite.PathToCompiledTest = path + return suite +} + +func Cleanup(goFlagsConfig types.GoFlagsConfig, suites ...TestSuite) { + if goFlagsConfig.BinaryMustBePreserved() { + return + } + for _, suite := range suites { + if !suite.Precompiled { + os.Remove(suite.PathToCompiledTest) + } + } +} + +type parallelSuiteBundle struct { + suite TestSuite + compiled chan TestSuite +} + +type OrderedParallelCompiler struct { + mutex *sync.Mutex + stopped bool + numCompilers int + + idx int + numSuites int + completionChannels []chan TestSuite +} + +func NewOrderedParallelCompiler(numCompilers int) *OrderedParallelCompiler { + return &OrderedParallelCompiler{ + mutex: &sync.Mutex{}, + numCompilers: numCompilers, + } +} + +func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig) { + opc.stopped = false + opc.idx = 0 + opc.numSuites = len(suites) + opc.completionChannels = make([]chan TestSuite, opc.numSuites) + + toCompile := make(chan parallelSuiteBundle, opc.numCompilers) + for compiler := 0; compiler < opc.numCompilers; compiler++ { + go func() { + for bundle := range toCompile { + c, suite := bundle.compiled, bundle.suite + opc.mutex.Lock() + stopped := opc.stopped + opc.mutex.Unlock() + if !stopped { + suite = CompileSuite(suite, goFlagsConfig) + } + c <- suite + } + }() + } + + for idx, suite := range suites { + opc.completionChannels[idx] = make(chan TestSuite, 1) + toCompile <- parallelSuiteBundle{suite, opc.completionChannels[idx]} + if idx == 0 { //compile first suite serially + suite = <-opc.completionChannels[0] + opc.completionChannels[0] <- suite + } + } + + close(toCompile) +} + +func (opc *OrderedParallelCompiler) Next() (int, TestSuite) { + if opc.idx >= opc.numSuites { + return opc.numSuites, TestSuite{} + } + + idx := opc.idx + suite := <-opc.completionChannels[idx] + opc.idx = opc.idx + 1 + + return idx, suite +} + +func (opc *OrderedParallelCompiler) StopAndDrain() { + opc.mutex.Lock() + opc.stopped = true + opc.mutex.Unlock() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go new file mode 100644 index 00000000..bd3c6d02 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -0,0 +1,237 @@ +package internal + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + + "github.com/google/pprof/profile" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string { + suffix := "" + if process != 0 { + suffix = fmt.Sprintf(".%d", process) + } + if cliConfig.OutputDir == "" { + return filepath.Join(suite.AbsPath(), assetName+suffix) + } + outputDir, _ := filepath.Abs(cliConfig.OutputDir) + return filepath.Join(outputDir, suite.NamespacedName()+"_"+assetName+suffix) +} + +func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIConfig, suiteConfig types.SuiteConfig, reporterConfig types.ReporterConfig, goFlagsConfig types.GoFlagsConfig) ([]string, error) { + messages := []string{} + suitesWithProfiles := suites.WithState(TestSuiteStatePassed, TestSuiteStateFailed) //anything else won't have actually run and generated a profile + + // merge cover profiles if need be + if goFlagsConfig.Cover && !cliConfig.KeepSeparateCoverprofiles { + coverProfiles := []string{} + for _, suite := range suitesWithProfiles { + if !suite.HasProgrammaticFocus { + coverProfiles = append(coverProfiles, AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)) + } + } + + if len(coverProfiles) > 0 { + dst := goFlagsConfig.CoverProfile + if cliConfig.OutputDir != "" { + dst = filepath.Join(cliConfig.OutputDir, goFlagsConfig.CoverProfile) + } + err := MergeAndCleanupCoverProfiles(coverProfiles, dst) + if err != nil { + return messages, err + } + coverage, err := GetCoverageFromCoverProfile(dst) + if err != nil { + return messages, err + } + if coverage == 0 { + messages = append(messages, "composite coverage: [no statements]") + } else if suitesWithProfiles.AnyHaveProgrammaticFocus() { + messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements however some suites did not contribute because they included programatically focused specs", coverage)) + } else { + messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements", coverage)) + } + } else { + messages = append(messages, "no composite coverage computed: all suites included programatically focused specs") + } + } + + // copy binaries if need be + for _, suite := range suitesWithProfiles { + if goFlagsConfig.BinaryMustBePreserved() && cliConfig.OutputDir != "" { + src := suite.PathToCompiledTest + dst := filepath.Join(cliConfig.OutputDir, suite.NamespacedName()+".test") + if suite.Precompiled { + if err := CopyFile(src, dst); err != nil { + return messages, err + } + } else { + if err := os.Rename(src, dst); err != nil { + return messages, err + } + } + } + } + + type reportFormat struct { + ReportName string + GenerateFunc func(types.Report, string) error + MergeFunc func([]string, string) ([]string, error) + } + reportFormats := []reportFormat{} + if reporterConfig.JSONReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JSONReport, GenerateFunc: reporters.GenerateJSONReport, MergeFunc: reporters.MergeAndCleanupJSONReports}) + } + if reporterConfig.JUnitReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JUnitReport, GenerateFunc: reporters.GenerateJUnitReport, MergeFunc: reporters.MergeAndCleanupJUnitReports}) + } + if reporterConfig.TeamcityReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.TeamcityReport, GenerateFunc: reporters.GenerateTeamcityReport, MergeFunc: reporters.MergeAndCleanupTeamcityReports}) + } + + // Generate reports for suites that failed to run + reportableSuites := suites.ThatAreGinkgoSuites() + for _, suite := range reportableSuites.WithState(TestSuiteStateFailedToCompile, TestSuiteStateFailedDueToTimeout, TestSuiteStateSkippedDueToPriorFailures, TestSuiteStateSkippedDueToEmptyCompilation) { + report := types.Report{ + SuitePath: suite.AbsPath(), + SuiteConfig: suiteConfig, + SuiteSucceeded: false, + } + switch suite.State { + case TestSuiteStateFailedToCompile: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, suite.CompilationError.Error()) + case TestSuiteStateFailedDueToTimeout: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, TIMEOUT_ELAPSED_FAILURE_REASON) + case TestSuiteStateSkippedDueToPriorFailures: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, PRIOR_FAILURES_FAILURE_REASON) + case TestSuiteStateSkippedDueToEmptyCompilation: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, EMPTY_SKIP_FAILURE_REASON) + report.SuiteSucceeded = true + } + + for _, format := range reportFormats { + format.GenerateFunc(report, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0)) + } + } + + // Merge reports unless we've been asked to keep them separate + if !cliConfig.KeepSeparateReports { + for _, format := range reportFormats { + reports := []string{} + for _, suite := range reportableSuites { + reports = append(reports, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0)) + } + dst := format.ReportName + if cliConfig.OutputDir != "" { + dst = filepath.Join(cliConfig.OutputDir, format.ReportName) + } + mergeMessages, err := format.MergeFunc(reports, dst) + messages = append(messages, mergeMessages...) + if err != nil { + return messages, err + } + } + } + + return messages, nil +} + +//loads each profile, combines them, deletes them, stores them in destination +func MergeAndCleanupCoverProfiles(profiles []string, destination string) error { + combined := &bytes.Buffer{} + modeRegex := regexp.MustCompile(`^mode: .*\n`) + for i, profile := range profiles { + contents, err := os.ReadFile(profile) + if err != nil { + return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error()) + } + os.Remove(profile) + + // remove the cover mode line from every file + // except the first one + if i > 0 { + contents = modeRegex.ReplaceAll(contents, []byte{}) + } + + _, err = combined.Write(contents) + + // Add a newline to the end of every file if missing. + if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' { + _, err = combined.Write([]byte("\n")) + } + + if err != nil { + return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error()) + } + } + + err := os.WriteFile(destination, combined.Bytes(), 0666) + if err != nil { + return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error()) + } + return nil +} + +func GetCoverageFromCoverProfile(profile string) (float64, error) { + cmd := exec.Command("go", "tool", "cover", "-func", profile) + output, err := cmd.CombinedOutput() + if err != nil { + return 0, fmt.Errorf("Could not process Coverprofile %s: %s", profile, err.Error()) + } + re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`) + matches := re.FindStringSubmatch(string(output)) + if matches == nil { + return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage") + } + coverageString := matches[1] + coverage, err := strconv.ParseFloat(coverageString, 64) + if err != nil { + return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage: %s", err.Error()) + } + + return coverage, nil +} + +func MergeProfiles(profilePaths []string, destination string) error { + profiles := []*profile.Profile{} + for _, profilePath := range profilePaths { + proFile, err := os.Open(profilePath) + if err != nil { + return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error()) + } + prof, err := profile.Parse(proFile) + if err != nil { + return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error()) + } + profiles = append(profiles, prof) + os.Remove(profilePath) + } + + mergedProfile, err := profile.Merge(profiles) + if err != nil { + return fmt.Errorf("Could not merge profiles:\n%s", err.Error()) + } + + outFile, err := os.Create(destination) + if err != nil { + return fmt.Errorf("Could not create merged profile %s:\n%s", destination, err.Error()) + } + err = mergedProfile.Write(outFile) + if err != nil { + return fmt.Errorf("Could not write merged profile %s:\n%s", destination, err.Error()) + } + err = outFile.Close() + if err != nil { + return fmt.Errorf("Could not close merged profile %s:\n%s", destination, err.Error()) + } + + return nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go new file mode 100644 index 00000000..41052ea1 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go @@ -0,0 +1,355 @@ +package internal + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "syscall" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/internal/parallel_support" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +func RunCompiledSuite(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + suite.State = TestSuiteStateFailed + suite.HasProgrammaticFocus = false + + if suite.PathToCompiledTest == "" { + return suite + } + + if suite.IsGinkgo && cliConfig.ComputedProcs() > 1 { + suite = runParallel(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs) + } else if suite.IsGinkgo { + suite = runSerial(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs) + } else { + suite = runGoTest(suite, cliConfig, goFlagsConfig) + } + runAfterRunHook(cliConfig.AfterRunHook, reporterConfig.NoColor, suite) + return suite +} + +func buildAndStartCommand(suite TestSuite, args []string, pipeToStdout bool) (*exec.Cmd, *bytes.Buffer) { + buf := &bytes.Buffer{} + cmd := exec.Command(suite.PathToCompiledTest, args...) + cmd.Dir = suite.Path + if pipeToStdout { + cmd.Stderr = io.MultiWriter(os.Stdout, buf) + cmd.Stdout = os.Stdout + } else { + cmd.Stderr = buf + cmd.Stdout = buf + } + err := cmd.Start() + command.AbortIfError("Failed to start test suite", err) + + return cmd, buf +} + +func checkForNoTestsWarning(buf *bytes.Buffer) bool { + if strings.Contains(buf.String(), "warning: no tests to run") { + fmt.Fprintf(os.Stderr, `Found no test suites, did you forget to run "ginkgo bootstrap"?`) + return true + } + return false +} + +func runGoTest(suite TestSuite, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) TestSuite { + // As we run the go test from the suite directory, make sure the cover profile is absolute + // and placed into the expected output directory when one is configured. + if goFlagsConfig.Cover && !filepath.IsAbs(goFlagsConfig.CoverProfile) { + goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + } + + args, err := types.GenerateGoTestRunArgs(goFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + cmd, buf := buildAndStartCommand(suite, args, true) + + cmd.Wait() + + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + return suite +} + +func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + if goFlagsConfig.Cover { + goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + } + if goFlagsConfig.BlockProfile != "" { + goFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0) + } + if goFlagsConfig.CPUProfile != "" { + goFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0) + } + if goFlagsConfig.MemProfile != "" { + goFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0) + } + if goFlagsConfig.MutexProfile != "" { + goFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0) + } + if reporterConfig.JSONReport != "" { + reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) + } + if reporterConfig.JUnitReport != "" { + reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) + } + if reporterConfig.TeamcityReport != "" { + reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0) + } + + args, err := types.GenerateGinkgoTestRunArgs(ginkgoConfig, reporterConfig, goFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + args = append([]string{"--test.timeout=0"}, args...) + args = append(args, additionalArgs...) + + cmd, buf := buildAndStartCommand(suite, args, true) + + cmd.Wait() + + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + suite.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + if suite.HasProgrammaticFocus { + if goFlagsConfig.Cover { + fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused") + } + if goFlagsConfig.BlockProfile != "" { + fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused") + } + if goFlagsConfig.CPUProfile != "" { + fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused") + } + if goFlagsConfig.MemProfile != "" { + fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused") + } + if goFlagsConfig.MutexProfile != "" { + fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused") + } + } + + return suite +} + +func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + type procResult struct { + passed bool + hasProgrammaticFocus bool + } + + numProcs := cliConfig.ComputedProcs() + procOutput := make([]*bytes.Buffer, numProcs) + coverProfiles := []string{} + + blockProfiles := []string{} + cpuProfiles := []string{} + memProfiles := []string{} + mutexProfiles := []string{} + + procResults := make(chan procResult) + + server, err := parallel_support.NewServer(numProcs, reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut)) + command.AbortIfError("Failed to start parallel spec server", err) + server.Start() + defer server.Close() + + if reporterConfig.JSONReport != "" { + reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) + } + if reporterConfig.JUnitReport != "" { + reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) + } + if reporterConfig.TeamcityReport != "" { + reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0) + } + + for proc := 1; proc <= numProcs; proc++ { + procGinkgoConfig := ginkgoConfig + procGinkgoConfig.ParallelProcess, procGinkgoConfig.ParallelTotal, procGinkgoConfig.ParallelHost = proc, numProcs, server.Address() + + procGoFlagsConfig := goFlagsConfig + if goFlagsConfig.Cover { + procGoFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, proc) + coverProfiles = append(coverProfiles, procGoFlagsConfig.CoverProfile) + } + if goFlagsConfig.BlockProfile != "" { + procGoFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, proc) + blockProfiles = append(blockProfiles, procGoFlagsConfig.BlockProfile) + } + if goFlagsConfig.CPUProfile != "" { + procGoFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, proc) + cpuProfiles = append(cpuProfiles, procGoFlagsConfig.CPUProfile) + } + if goFlagsConfig.MemProfile != "" { + procGoFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, proc) + memProfiles = append(memProfiles, procGoFlagsConfig.MemProfile) + } + if goFlagsConfig.MutexProfile != "" { + procGoFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, proc) + mutexProfiles = append(mutexProfiles, procGoFlagsConfig.MutexProfile) + } + + args, err := types.GenerateGinkgoTestRunArgs(procGinkgoConfig, reporterConfig, procGoFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + args = append([]string{"--test.timeout=0"}, args...) + args = append(args, additionalArgs...) + + cmd, buf := buildAndStartCommand(suite, args, false) + procOutput[proc-1] = buf + server.RegisterAlive(proc, func() bool { return cmd.ProcessState == nil || !cmd.ProcessState.Exited() }) + + go func() { + cmd.Wait() + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + procResults <- procResult{ + passed: (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE), + hasProgrammaticFocus: exitStatus == types.GINKGO_FOCUS_EXIT_CODE, + } + }() + } + + passed := true + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + result := <-procResults + passed = passed && result.passed + suite.HasProgrammaticFocus = suite.HasProgrammaticFocus || result.hasProgrammaticFocus + } + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + select { + case <-server.GetSuiteDone(): + fmt.Println("") + case <-time.After(time.Second): + //one of the nodes never finished reporting to the server. Something must have gone wrong. + fmt.Fprint(formatter.ColorableStdErr, formatter.F("\n{{bold}}{{red}}Ginkgo timed out waiting for all parallel procs to report back{{/}}\n")) + fmt.Fprint(formatter.ColorableStdErr, formatter.F("{{gray}}Test suite:{{/}} %s (%s)\n\n", suite.PackageName, suite.Path)) + fmt.Fprint(formatter.ColorableStdErr, formatter.Fiw(0, formatter.COLS, "This occurs if a parallel process exits before it reports its results to the Ginkgo CLI. The CLI will now print out all the stdout/stderr output it's collected from the running processes. However you may not see anything useful in these logs because the individual test processes usually intercept output to stdout/stderr in order to capture it in the spec reports.\n\nYou may want to try rerunning your test suite with {{light-gray}}--output-interceptor-mode=none{{/}} to see additional output here and debug your suite.\n")) + fmt.Fprintln(formatter.ColorableStdErr, " ") + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{bold}}Output from proc %d:{{/}}\n", proc)) + fmt.Fprintln(os.Stderr, formatter.Fi(1, "%s", procOutput[proc-1].String())) + } + fmt.Fprintf(os.Stderr, "** End **") + } + + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + output := procOutput[proc-1].String() + if proc == 1 && checkForNoTestsWarning(procOutput[0]) && cliConfig.RequireSuite { + suite.State = TestSuiteStateFailed + } + if strings.Contains(output, "deprecated Ginkgo functionality") { + fmt.Fprintln(os.Stderr, output) + } + } + + if len(coverProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused") + } else { + coverProfile := AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + err := MergeAndCleanupCoverProfiles(coverProfiles, coverProfile) + command.AbortIfError("Failed to combine cover profiles", err) + + coverage, err := GetCoverageFromCoverProfile(coverProfile) + command.AbortIfError("Failed to compute coverage", err) + if coverage == 0 { + fmt.Fprintln(os.Stdout, "coverage: [no statements]") + } else { + fmt.Fprintf(os.Stdout, "coverage: %.1f%% of statements\n", coverage) + } + } + } + if len(blockProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused") + } else { + blockProfile := AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0) + err := MergeProfiles(blockProfiles, blockProfile) + command.AbortIfError("Failed to combine blockprofiles", err) + } + } + if len(cpuProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused") + } else { + cpuProfile := AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0) + err := MergeProfiles(cpuProfiles, cpuProfile) + command.AbortIfError("Failed to combine cpuprofiles", err) + } + } + if len(memProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused") + } else { + memProfile := AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0) + err := MergeProfiles(memProfiles, memProfile) + command.AbortIfError("Failed to combine memprofiles", err) + } + } + if len(mutexProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused") + } else { + mutexProfile := AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0) + err := MergeProfiles(mutexProfiles, mutexProfile) + command.AbortIfError("Failed to combine mutexprofiles", err) + } + } + + return suite +} + +func runAfterRunHook(command string, noColor bool, suite TestSuite) { + if command == "" { + return + } + f := formatter.NewWithNoColorBool(noColor) + + // Allow for string replacement to pass input to the command + passed := "[FAIL]" + if suite.State.Is(TestSuiteStatePassed) { + passed = "[PASS]" + } + command = strings.ReplaceAll(command, "(ginkgo-suite-passed)", passed) + command = strings.ReplaceAll(command, "(ginkgo-suite-name)", suite.PackageName) + + // Must break command into parts + splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`) + parts := splitArgs.FindAllString(command, -1) + + output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput() + if err != nil { + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{red}}{{bold}}After-run-hook failed:{{/}}")) + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{red}}%s{{/}}", output)) + } else { + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{green}}{{bold}}After-run-hook succeeded:{{/}}")) + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{green}}%s{{/}}", output)) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go new file mode 100644 index 00000000..df99875b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go @@ -0,0 +1,284 @@ +package internal + +import ( + "errors" + "math/rand" + "os" + "path" + "path/filepath" + "regexp" + "runtime" + "strings" + + "github.com/onsi/ginkgo/v2/types" +) + +const TIMEOUT_ELAPSED_FAILURE_REASON = "Suite did not run because the timeout elapsed" +const PRIOR_FAILURES_FAILURE_REASON = "Suite did not run because prior suites failed and --keep-going is not set" +const EMPTY_SKIP_FAILURE_REASON = "Suite did not run go test reported that no test files were found" + +type TestSuiteState uint + +const ( + TestSuiteStateInvalid TestSuiteState = iota + + TestSuiteStateUncompiled + TestSuiteStateCompiled + + TestSuiteStatePassed + + TestSuiteStateSkippedDueToEmptyCompilation + TestSuiteStateSkippedByFilter + TestSuiteStateSkippedDueToPriorFailures + + TestSuiteStateFailed + TestSuiteStateFailedDueToTimeout + TestSuiteStateFailedToCompile +) + +var TestSuiteStateFailureStates = []TestSuiteState{TestSuiteStateFailed, TestSuiteStateFailedDueToTimeout, TestSuiteStateFailedToCompile} + +func (state TestSuiteState) Is(states ...TestSuiteState) bool { + for _, suiteState := range states { + if suiteState == state { + return true + } + } + + return false +} + +type TestSuite struct { + Path string + PackageName string + IsGinkgo bool + + Precompiled bool + PathToCompiledTest string + CompilationError error + + HasProgrammaticFocus bool + State TestSuiteState +} + +func (ts TestSuite) AbsPath() string { + path, _ := filepath.Abs(ts.Path) + return path +} + +func (ts TestSuite) NamespacedName() string { + name := relPath(ts.Path) + name = strings.TrimLeft(name, "."+string(filepath.Separator)) + name = strings.ReplaceAll(name, string(filepath.Separator), "_") + name = strings.ReplaceAll(name, " ", "_") + if name == "" { + return ts.PackageName + } + return name +} + +type TestSuites []TestSuite + +func (ts TestSuites) AnyHaveProgrammaticFocus() bool { + for _, suite := range ts { + if suite.HasProgrammaticFocus { + return true + } + } + + return false +} + +func (ts TestSuites) ThatAreGinkgoSuites() TestSuites { + out := TestSuites{} + for _, suite := range ts { + if suite.IsGinkgo { + out = append(out, suite) + } + } + return out +} + +func (ts TestSuites) CountWithState(states ...TestSuiteState) int { + n := 0 + for _, suite := range ts { + if suite.State.Is(states...) { + n += 1 + } + } + + return n +} + +func (ts TestSuites) WithState(states ...TestSuiteState) TestSuites { + out := TestSuites{} + for _, suite := range ts { + if suite.State.Is(states...) { + out = append(out, suite) + } + } + + return out +} + +func (ts TestSuites) WithoutState(states ...TestSuiteState) TestSuites { + out := TestSuites{} + for _, suite := range ts { + if !suite.State.Is(states...) { + out = append(out, suite) + } + } + + return out +} + +func (ts TestSuites) ShuffledCopy(seed int64) TestSuites { + out := make(TestSuites, len(ts)) + permutation := rand.New(rand.NewSource(seed)).Perm(len(ts)) + for i, j := range permutation { + out[i] = ts[j] + } + return out +} + +func FindSuites(args []string, cliConfig types.CLIConfig, allowPrecompiled bool) TestSuites { + suites := TestSuites{} + + if len(args) > 0 { + for _, arg := range args { + if allowPrecompiled { + suite, err := precompiledTestSuite(arg) + if err == nil { + suites = append(suites, suite) + continue + } + } + recurseForSuite := cliConfig.Recurse + if strings.HasSuffix(arg, "/...") && arg != "/..." { + arg = arg[:len(arg)-4] + recurseForSuite = true + } + suites = append(suites, suitesInDir(arg, recurseForSuite)...) + } + } else { + suites = suitesInDir(".", cliConfig.Recurse) + } + + if cliConfig.SkipPackage != "" { + skipFilters := strings.Split(cliConfig.SkipPackage, ",") + for idx := range suites { + for _, skipFilter := range skipFilters { + if strings.Contains(suites[idx].Path, skipFilter) { + suites[idx].State = TestSuiteStateSkippedByFilter + break + } + } + } + } + + return suites +} + +func precompiledTestSuite(path string) (TestSuite, error) { + info, err := os.Stat(path) + if err != nil { + return TestSuite{}, err + } + + if info.IsDir() { + return TestSuite{}, errors.New("this is a directory, not a file") + } + + if filepath.Ext(path) != ".test" && filepath.Ext(path) != ".exe" { + return TestSuite{}, errors.New("this is not a .test binary") + } + + if filepath.Ext(path) == ".test" && runtime.GOOS != "windows" && info.Mode()&0111 == 0 { + return TestSuite{}, errors.New("this is not executable") + } + + dir := relPath(filepath.Dir(path)) + packageName := strings.TrimSuffix(filepath.Base(path), ".exe") + packageName = strings.TrimSuffix(packageName, ".test") + + path, err = filepath.Abs(path) + if err != nil { + return TestSuite{}, err + } + + return TestSuite{ + Path: dir, + PackageName: packageName, + IsGinkgo: true, + Precompiled: true, + PathToCompiledTest: path, + State: TestSuiteStateCompiled, + }, nil +} + +func suitesInDir(dir string, recurse bool) TestSuites { + suites := TestSuites{} + + if path.Base(dir) == "vendor" { + return suites + } + + files, _ := os.ReadDir(dir) + re := regexp.MustCompile(`^[^._].*_test\.go$`) + for _, file := range files { + if !file.IsDir() && re.MatchString(file.Name()) { + suite := TestSuite{ + Path: relPath(dir), + PackageName: packageNameForSuite(dir), + IsGinkgo: filesHaveGinkgoSuite(dir, files), + State: TestSuiteStateUncompiled, + } + suites = append(suites, suite) + break + } + } + + if recurse { + re = regexp.MustCompile(`^[._]`) + for _, file := range files { + if file.IsDir() && !re.MatchString(file.Name()) { + suites = append(suites, suitesInDir(dir+"/"+file.Name(), recurse)...) + } + } + } + + return suites +} + +func relPath(dir string) string { + dir, _ = filepath.Abs(dir) + cwd, _ := os.Getwd() + dir, _ = filepath.Rel(cwd, filepath.Clean(dir)) + + if string(dir[0]) != "." { + dir = "." + string(filepath.Separator) + dir + } + + return dir +} + +func packageNameForSuite(dir string) string { + path, _ := filepath.Abs(dir) + return filepath.Base(path) +} + +func filesHaveGinkgoSuite(dir string, files []os.DirEntry) bool { + reTestFile := regexp.MustCompile(`_test\.go$`) + reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"|\/ginkgo\/v2"|\/ginkgo\/v2/dsl/`) + + for _, file := range files { + if !file.IsDir() && reTestFile.MatchString(file.Name()) { + contents, _ := os.ReadFile(dir + "/" + file.Name()) + if reGinkgo.Match(contents) { + return true + } + } + } + + return false +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go new file mode 100644 index 00000000..bd9ca7d5 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go @@ -0,0 +1,86 @@ +package internal + +import ( + "fmt" + "io" + "os" + "os/exec" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +func FileExists(path string) bool { + _, err := os.Stat(path) + return err == nil +} + +func CopyFile(src string, dest string) error { + srcFile, err := os.Open(src) + if err != nil { + return err + } + + srcStat, err := srcFile.Stat() + if err != nil { + return err + } + + if _, err := os.Stat(dest); err == nil { + os.Remove(dest) + } + + destFile, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE, srcStat.Mode()) + if err != nil { + return err + } + + _, err = io.Copy(destFile, srcFile) + if err != nil { + return err + } + + if err := srcFile.Close(); err != nil { + return err + } + return destFile.Close() +} + +func GoFmt(path string) { + out, err := exec.Command("go", "fmt", path).CombinedOutput() + if err != nil { + command.AbortIfError(fmt.Sprintf("Could not fmt:\n%s\n", string(out)), err) + } +} + +func PluralizedWord(singular, plural string, count int) string { + if count == 1 { + return singular + } + return plural +} + +func FailedSuitesReport(suites TestSuites, f formatter.Formatter) string { + out := "" + out += "There were failures detected in the following suites:\n" + + maxPackageNameLength := 0 + for _, suite := range suites.WithState(TestSuiteStateFailureStates...) { + if len(suite.PackageName) > maxPackageNameLength { + maxPackageNameLength = len(suite.PackageName) + } + } + + packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength) + for _, suite := range suites { + switch suite.State { + case TestSuiteStateFailed: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s{{/}}\n", suite.PackageName, suite.Path) + case TestSuiteStateFailedToCompile: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{magenta}}[Compilation failure]{{/}}\n", suite.PackageName, suite.Path) + case TestSuiteStateFailedDueToTimeout: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{orange}}[%s]{{/}}\n", suite.PackageName, suite.Path, TIMEOUT_ELAPSED_FAILURE_REASON) + } + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go new file mode 100644 index 00000000..9da1bab3 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go @@ -0,0 +1,54 @@ +package internal + +import ( + "fmt" + "os/exec" + "regexp" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +var versiorRe = regexp.MustCompile(`v(\d+\.\d+\.\d+)`) + +func VerifyCLIAndFrameworkVersion(suites TestSuites) { + cliVersion := types.VERSION + mismatches := map[string][]string{} + + for _, suite := range suites { + cmd := exec.Command("go", "list", "-m", "github.com/onsi/ginkgo/v2") + cmd.Dir = suite.Path + output, err := cmd.CombinedOutput() + if err != nil { + continue + } + components := strings.Split(string(output), " ") + if len(components) != 2 { + continue + } + matches := versiorRe.FindStringSubmatch(components[1]) + if matches == nil || len(matches) != 2 { + continue + } + libraryVersion := matches[1] + if cliVersion != libraryVersion { + mismatches[libraryVersion] = append(mismatches[libraryVersion], suite.PackageName) + } + } + + if len(mismatches) == 0 { + return + } + + fmt.Println(formatter.F("{{red}}{{bold}}Ginkgo detected a version mismatch between the Ginkgo CLI and the version of Ginkgo imported by your packages:{{/}}")) + + fmt.Println(formatter.Fi(1, "Ginkgo CLI Version:")) + fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}}", cliVersion)) + fmt.Println(formatter.Fi(1, "Mismatched package versions found:")) + for version, packages := range mismatches { + fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}} used by %s", version, strings.Join(packages, ", "))) + } + fmt.Println("") + fmt.Println(formatter.Fiw(1, formatter.COLS, "{{gray}}Ginkgo will continue to attempt to run but you may see errors (including flag parsing errors) and should either update your go.mod or your version of the Ginkgo CLI to match.\n\nTo install the matching version of the CLI run\n {{bold}}go install github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file. Alternatively you can use\n {{bold}}go run github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file to invoke the matching version of the Ginkgo CLI.\n\nIf you are attempting to test multiple packages that each have a different version of the Ginkgo library with a single Ginkgo CLI that is currently unsupported.\n{{/}}")) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go new file mode 100644 index 00000000..6c61f09d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go @@ -0,0 +1,123 @@ +package labels + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "sort" + "strconv" + "strings" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" + "golang.org/x/tools/go/ast/inspector" +) + +func BuildLabelsCommand() command.Command { + var cliConfig = types.NewDefaultCLIConfig() + + flags, err := types.BuildLabelsCommandFlagSet(&cliConfig) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "labels", + Usage: "ginkgo labels ", + Flags: flags, + ShortDoc: "List labels detected in the passed-in packages (or the package in the current directory if left blank).", + DocLink: "spec-labels", + Command: func(args []string, _ []string) { + ListLabels(args, cliConfig) + }, + } +} + +func ListLabels(args []string, cliConfig types.CLIConfig) { + suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + for _, suite := range suites { + labels := fetchLabelsFromPackage(suite.Path) + if len(labels) == 0 { + fmt.Printf("%s: No labels found\n", suite.PackageName) + } else { + fmt.Printf("%s: [%s]\n", suite.PackageName, strings.Join(labels, ", ")) + } + } +} + +func fetchLabelsFromPackage(packagePath string) []string { + fset := token.NewFileSet() + parsedPackages, err := parser.ParseDir(fset, packagePath, nil, 0) + command.AbortIfError("Failed to parse package source:", err) + + files := []*ast.File{} + hasTestPackage := false + for key, pkg := range parsedPackages { + if strings.HasSuffix(key, "_test") { + hasTestPackage = true + for _, file := range pkg.Files { + files = append(files, file) + } + } + } + if !hasTestPackage { + for _, pkg := range parsedPackages { + for _, file := range pkg.Files { + files = append(files, file) + } + } + } + + seen := map[string]bool{} + labels := []string{} + ispr := inspector.New(files) + ispr.Preorder([]ast.Node{&ast.CallExpr{}}, func(n ast.Node) { + potentialLabels := fetchLabels(n.(*ast.CallExpr)) + for _, label := range potentialLabels { + if !seen[label] { + seen[label] = true + labels = append(labels, strconv.Quote(label)) + } + } + }) + + sort.Strings(labels) + return labels +} + +func fetchLabels(callExpr *ast.CallExpr) []string { + out := []string{} + switch expr := callExpr.Fun.(type) { + case *ast.Ident: + if expr.Name != "Label" { + return out + } + case *ast.SelectorExpr: + if expr.Sel.Name != "Label" { + return out + } + default: + return out + } + for _, arg := range callExpr.Args { + switch expr := arg.(type) { + case *ast.BasicLit: + if expr.Kind == token.STRING { + unquoted, err := strconv.Unquote(expr.Value) + if err != nil { + unquoted = expr.Value + } + validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{}) + if err == nil { + out = append(out, validated) + } + } + } + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go new file mode 100644 index 00000000..e9abb27d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go @@ -0,0 +1,58 @@ +package main + +import ( + "fmt" + "os" + + "github.com/onsi/ginkgo/v2/ginkgo/build" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/generators" + "github.com/onsi/ginkgo/v2/ginkgo/labels" + "github.com/onsi/ginkgo/v2/ginkgo/outline" + "github.com/onsi/ginkgo/v2/ginkgo/run" + "github.com/onsi/ginkgo/v2/ginkgo/unfocus" + "github.com/onsi/ginkgo/v2/ginkgo/watch" + "github.com/onsi/ginkgo/v2/types" +) + +var program command.Program + +func GenerateCommands() []command.Command { + return []command.Command{ + watch.BuildWatchCommand(), + build.BuildBuildCommand(), + generators.BuildBootstrapCommand(), + generators.BuildGenerateCommand(), + labels.BuildLabelsCommand(), + outline.BuildOutlineCommand(), + unfocus.BuildUnfocusCommand(), + BuildVersionCommand(), + } +} + +func main() { + program = command.Program{ + Name: "ginkgo", + Heading: fmt.Sprintf("Ginkgo Version %s", types.VERSION), + Commands: GenerateCommands(), + DefaultCommand: run.BuildRunCommand(), + DeprecatedCommands: []command.DeprecatedCommand{ + {Name: "convert", Deprecation: types.Deprecations.Convert()}, + {Name: "blur", Deprecation: types.Deprecations.Blur()}, + {Name: "nodot", Deprecation: types.Deprecations.Nodot()}, + }, + } + + program.RunAndExit(os.Args) +} + +func BuildVersionCommand() command.Command { + return command.Command{ + Name: "version", + Usage: "ginkgo version", + ShortDoc: "Print Ginkgo's version", + Command: func(_ []string, _ []string) { + fmt.Printf("Ginkgo Version %s\n", types.VERSION) + }, + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go new file mode 100644 index 00000000..958daccb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go @@ -0,0 +1,300 @@ +package outline + +import ( + "github.com/onsi/ginkgo/v2/types" + "go/ast" + "go/token" + "strconv" +) + +const ( + // undefinedTextAlt is used if the spec/container text cannot be derived + undefinedTextAlt = "undefined" +) + +// ginkgoMetadata holds useful bits of information for every entry in the outline +type ginkgoMetadata struct { + // Name is the spec or container function name, e.g. `Describe` or `It` + Name string `json:"name"` + + // Text is the `text` argument passed to specs, and some containers + Text string `json:"text"` + + // Start is the position of first character of the spec or container block + Start int `json:"start"` + + // End is the position of first character immediately after the spec or container block + End int `json:"end"` + + Spec bool `json:"spec"` + Focused bool `json:"focused"` + Pending bool `json:"pending"` + Labels []string `json:"labels"` +} + +// ginkgoNode is used to construct the outline as a tree +type ginkgoNode struct { + ginkgoMetadata + Nodes []*ginkgoNode `json:"nodes"` +} + +type walkFunc func(n *ginkgoNode) + +func (n *ginkgoNode) PreOrder(f walkFunc) { + f(n) + for _, m := range n.Nodes { + m.PreOrder(f) + } +} + +func (n *ginkgoNode) PostOrder(f walkFunc) { + for _, m := range n.Nodes { + m.PostOrder(f) + } + f(n) +} + +func (n *ginkgoNode) Walk(pre, post walkFunc) { + pre(n) + for _, m := range n.Nodes { + m.Walk(pre, post) + } + post(n) +} + +// PropagateInheritedProperties propagates the Pending and Focused properties +// through the subtree rooted at n. +func (n *ginkgoNode) PropagateInheritedProperties() { + n.PreOrder(func(thisNode *ginkgoNode) { + for _, descendantNode := range thisNode.Nodes { + if thisNode.Pending { + descendantNode.Pending = true + descendantNode.Focused = false + } + if thisNode.Focused && !descendantNode.Pending { + descendantNode.Focused = true + } + } + }) +} + +// BackpropagateUnfocus propagates the Focused property through the subtree +// rooted at n. It applies the rule described in the Ginkgo docs: +// > Nested programmatically focused specs follow a simple rule: if a +// > leaf-node is marked focused, any of its ancestor nodes that are marked +// > focus will be unfocused. +func (n *ginkgoNode) BackpropagateUnfocus() { + focusedSpecInSubtreeStack := []bool{} + n.PostOrder(func(thisNode *ginkgoNode) { + if thisNode.Spec { + focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, thisNode.Focused) + return + } + focusedSpecInSubtree := false + for range thisNode.Nodes { + focusedSpecInSubtree = focusedSpecInSubtree || focusedSpecInSubtreeStack[len(focusedSpecInSubtreeStack)-1] + focusedSpecInSubtreeStack = focusedSpecInSubtreeStack[0 : len(focusedSpecInSubtreeStack)-1] + } + focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, focusedSpecInSubtree) + if focusedSpecInSubtree { + thisNode.Focused = false + } + }) + +} + +func packageAndIdentNamesFromCallExpr(ce *ast.CallExpr) (string, string, bool) { + switch ex := ce.Fun.(type) { + case *ast.Ident: + return "", ex.Name, true + case *ast.SelectorExpr: + pkgID, ok := ex.X.(*ast.Ident) + if !ok { + return "", "", false + } + // A package identifier is top-level, so Obj must be nil + if pkgID.Obj != nil { + return "", "", false + } + if ex.Sel == nil { + return "", "", false + } + return pkgID.Name, ex.Sel.Name, true + default: + return "", "", false + } +} + +// absoluteOffsetsForNode derives the absolute character offsets of the node start and +// end positions. +func absoluteOffsetsForNode(fset *token.FileSet, n ast.Node) (start, end int) { + return fset.PositionFor(n.Pos(), false).Offset, fset.PositionFor(n.End(), false).Offset +} + +// ginkgoNodeFromCallExpr derives an outline entry from a go AST subtree +// corresponding to a Ginkgo container or spec. +func ginkgoNodeFromCallExpr(fset *token.FileSet, ce *ast.CallExpr, ginkgoPackageName *string) (*ginkgoNode, bool) { + packageName, identName, ok := packageAndIdentNamesFromCallExpr(ce) + if !ok { + return nil, false + } + + n := ginkgoNode{} + n.Name = identName + n.Start, n.End = absoluteOffsetsForNode(fset, ce) + n.Nodes = make([]*ginkgoNode, 0) + switch identName { + case "It", "Specify", "Entry": + n.Spec = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + n.Pending = pendingFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "FIt", "FSpecify", "FEntry": + n.Spec = true + n.Focused = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "PIt", "PSpecify", "XIt", "XSpecify", "PEntry", "XEntry": + n.Spec = true + n.Pending = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "Context", "Describe", "When", "DescribeTable": + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + n.Pending = pendingFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "FContext", "FDescribe", "FWhen", "FDescribeTable": + n.Focused = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "PContext", "PDescribe", "PWhen", "XContext", "XDescribe", "XWhen", "PDescribeTable", "XDescribeTable": + n.Pending = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "By": + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "AfterEach", "BeforeEach": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "JustAfterEach", "JustBeforeEach": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "AfterSuite", "BeforeSuite": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "SynchronizedAfterSuite", "SynchronizedBeforeSuite": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + default: + return nil, false + } +} + +// textOrAltFromCallExpr tries to derive the "text" of a Ginkgo spec or +// container. If it cannot derive it, it returns the alt text. +func textOrAltFromCallExpr(ce *ast.CallExpr, alt string) string { + text, defined := textFromCallExpr(ce) + if !defined { + return alt + } + return text +} + +// textFromCallExpr tries to derive the "text" of a Ginkgo spec or container. If +// it cannot derive it, it returns false. +func textFromCallExpr(ce *ast.CallExpr) (string, bool) { + if len(ce.Args) < 1 { + return "", false + } + text, ok := ce.Args[0].(*ast.BasicLit) + if !ok { + return "", false + } + switch text.Kind { + case token.CHAR, token.STRING: + // For token.CHAR and token.STRING, Value is quoted + unquoted, err := strconv.Unquote(text.Value) + if err != nil { + // If unquoting fails, just use the raw Value + return text.Value, true + } + return unquoted, true + default: + return text.Value, true + } +} + +func labelFromCallExpr(ce *ast.CallExpr) []string { + + labels := []string{} + if len(ce.Args) < 2 { + return labels + } + + for _, arg := range ce.Args[1:] { + switch expr := arg.(type) { + case *ast.CallExpr: + id, ok := expr.Fun.(*ast.Ident) + if !ok { + // to skip over cases where the expr.Fun. is actually *ast.SelectorExpr + continue + } + if id.Name == "Label" { + ls := extractLabels(expr) + labels = append(labels, ls...) + } + } + } + return labels +} + +func extractLabels(expr *ast.CallExpr) []string { + out := []string{} + for _, arg := range expr.Args { + switch expr := arg.(type) { + case *ast.BasicLit: + if expr.Kind == token.STRING { + unquoted, err := strconv.Unquote(expr.Value) + if err != nil { + unquoted = expr.Value + } + validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{}) + if err == nil { + out = append(out, validated) + } + } + } + } + + return out +} + +func pendingFromCallExpr(ce *ast.CallExpr) bool { + + pending := false + if len(ce.Args) < 2 { + return pending + } + + for _, arg := range ce.Args[1:] { + switch expr := arg.(type) { + case *ast.CallExpr: + id, ok := expr.Fun.(*ast.Ident) + if !ok { + // to skip over cases where the expr.Fun. is actually *ast.SelectorExpr + continue + } + if id.Name == "Pending" { + pending = true + } + case *ast.Ident: + if expr.Name == "Pending" { + pending = true + } + } + } + return pending +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go new file mode 100644 index 00000000..67ec5ab7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go @@ -0,0 +1,65 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Most of the required functions were available in the +// "golang.org/x/tools/go/ast/astutil" package, but not exported. +// They were copied from https://github.com/golang/tools/blob/2b0845dc783e36ae26d683f4915a5840ef01ab0f/go/ast/astutil/imports.go + +package outline + +import ( + "go/ast" + "strconv" + "strings" +) + +// packageNameForImport returns the package name for the package. If the package +// is not imported, it returns nil. "Package name" refers to `pkgname` in the +// call expression `pkgname.ExportedIdentifier`. Examples: +// (import path not found) -> nil +// "import example.com/pkg/foo" -> "foo" +// "import fooalias example.com/pkg/foo" -> "fooalias" +// "import . example.com/pkg/foo" -> "" +func packageNameForImport(f *ast.File, path string) *string { + spec := importSpec(f, path) + if spec == nil { + return nil + } + name := spec.Name.String() + if name == "" { + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + } + if name == "." { + name = "" + } + return &name +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if strings.HasPrefix(importPath(s), path) { + return s + } + } + return nil +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return t +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go new file mode 100644 index 00000000..c2327cda --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go @@ -0,0 +1,110 @@ +package outline + +import ( + "encoding/json" + "fmt" + "go/ast" + "go/token" + "strings" + + "golang.org/x/tools/go/ast/inspector" +) + +const ( + // ginkgoImportPath is the well-known ginkgo import path + ginkgoImportPath = "github.com/onsi/ginkgo/v2" +) + +// FromASTFile returns an outline for a Ginkgo test source file +func FromASTFile(fset *token.FileSet, src *ast.File) (*outline, error) { + ginkgoPackageName := packageNameForImport(src, ginkgoImportPath) + if ginkgoPackageName == nil { + return nil, fmt.Errorf("file does not import %q", ginkgoImportPath) + } + + root := ginkgoNode{} + stack := []*ginkgoNode{&root} + ispr := inspector.New([]*ast.File{src}) + ispr.Nodes([]ast.Node{(*ast.CallExpr)(nil)}, func(node ast.Node, push bool) bool { + if push { + // Pre-order traversal + ce, ok := node.(*ast.CallExpr) + if !ok { + // Because `Nodes` calls this function only when the node is an + // ast.CallExpr, this should never happen + panic(fmt.Errorf("node starting at %d, ending at %d is not an *ast.CallExpr", node.Pos(), node.End())) + } + gn, ok := ginkgoNodeFromCallExpr(fset, ce, ginkgoPackageName) + if !ok { + // Node is not a Ginkgo spec or container, continue + return true + } + parent := stack[len(stack)-1] + parent.Nodes = append(parent.Nodes, gn) + stack = append(stack, gn) + return true + } + // Post-order traversal + start, end := absoluteOffsetsForNode(fset, node) + lastVisitedGinkgoNode := stack[len(stack)-1] + if start != lastVisitedGinkgoNode.Start || end != lastVisitedGinkgoNode.End { + // Node is not a Ginkgo spec or container, so it was not pushed onto the stack, continue + return true + } + stack = stack[0 : len(stack)-1] + return true + }) + if len(root.Nodes) == 0 { + return &outline{[]*ginkgoNode{}}, nil + } + + // Derive the final focused property for all nodes. This must be done + // _before_ propagating the inherited focused property. + root.BackpropagateUnfocus() + // Now, propagate inherited properties, including focused and pending. + root.PropagateInheritedProperties() + + return &outline{root.Nodes}, nil +} + +type outline struct { + Nodes []*ginkgoNode `json:"nodes"` +} + +func (o *outline) MarshalJSON() ([]byte, error) { + return json.Marshal(o.Nodes) +} + +// String returns a CSV-formatted outline. Spec or container are output in +// depth-first order. +func (o *outline) String() string { + return o.StringIndent(0) +} + +// StringIndent returns a CSV-formated outline, but every line is indented by +// one 'width' of spaces for every level of nesting. +func (o *outline) StringIndent(width int) string { + var b strings.Builder + b.WriteString("Name,Text,Start,End,Spec,Focused,Pending,Labels\n") + + currentIndent := 0 + pre := func(n *ginkgoNode) { + b.WriteString(fmt.Sprintf("%*s", currentIndent, "")) + var labels string + if len(n.Labels) == 1 { + labels = n.Labels[0] + } else { + labels = strings.Join(n.Labels, ", ") + } + //enclosing labels in a double quoted comma separate listed so that when inmported into a CSV app the Labels column has comma separate strings + b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t,\"%s\"\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending, labels)) + currentIndent += width + } + post := func(n *ginkgoNode) { + currentIndent -= width + } + for _, n := range o.Nodes { + n.Walk(pre, post) + } + return b.String() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go new file mode 100644 index 00000000..36698d46 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go @@ -0,0 +1,98 @@ +package outline + +import ( + "encoding/json" + "fmt" + "go/parser" + "go/token" + "os" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/types" +) + +const ( + // indentWidth is the width used by the 'indent' output + indentWidth = 4 + // stdinAlias is a portable alias for stdin. This convention is used in + // other CLIs, e.g., kubectl. + stdinAlias = "-" + usageCommand = "ginkgo outline " +) + +type outlineConfig struct { + Format string +} + +func BuildOutlineCommand() command.Command { + conf := outlineConfig{ + Format: "csv", + } + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "format", KeyPath: "Format", + Usage: "Format of outline", + UsageArgument: "one of 'csv', 'indent', or 'json'", + UsageDefaultValue: conf.Format, + }, + }, + &conf, + types.GinkgoFlagSections{}, + ) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "outline", + Usage: "ginkgo outline ", + ShortDoc: "Create an outline of Ginkgo symbols for a file", + Documentation: "To read from stdin, use: `ginkgo outline -`", + DocLink: "creating-an-outline-of-specs", + Flags: flags, + Command: func(args []string, _ []string) { + outlineFile(args, conf.Format) + }, + } +} + +func outlineFile(args []string, format string) { + if len(args) != 1 { + command.AbortWithUsage("outline expects exactly one argument") + } + + filename := args[0] + var src *os.File + if filename == stdinAlias { + src = os.Stdin + } else { + var err error + src, err = os.Open(filename) + command.AbortIfError("Failed to open file:", err) + } + + fset := token.NewFileSet() + + parsedSrc, err := parser.ParseFile(fset, filename, src, 0) + command.AbortIfError("Failed to parse source:", err) + + o, err := FromASTFile(fset, parsedSrc) + command.AbortIfError("Failed to create outline:", err) + + var oerr error + switch format { + case "csv": + _, oerr = fmt.Print(o) + case "indent": + _, oerr = fmt.Print(o.StringIndent(indentWidth)) + case "json": + b, err := json.Marshal(o) + if err != nil { + println(fmt.Sprintf("error marshalling to json: %s", err)) + } + _, oerr = fmt.Println(string(b)) + default: + command.AbortWith("Format %s not accepted", format) + } + command.AbortIfError("Failed to write outline:", oerr) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go new file mode 100644 index 00000000..aaed4d57 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go @@ -0,0 +1,232 @@ +package run + +import ( + "fmt" + "os" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildRunCommand() command.Command { + var suiteConfig = types.NewDefaultSuiteConfig() + var reporterConfig = types.NewDefaultReporterConfig() + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildRunCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + + interruptHandler := interrupt_handler.NewInterruptHandler(nil) + interrupt_handler.SwallowSigQuit() + + return command.Command{ + Name: "run", + Flags: flags, + Usage: "ginkgo run -- ", + ShortDoc: "Run the tests in the passed in (or the package in the current directory if left blank)", + Documentation: "Any arguments after -- will be passed to the test.", + DocLink: "running-tests", + Command: func(args []string, additionalArgs []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + runner := &SpecRunner{ + cliConfig: cliConfig, + goFlagsConfig: goFlagsConfig, + suiteConfig: suiteConfig, + reporterConfig: reporterConfig, + flags: flags, + + interruptHandler: interruptHandler, + } + + runner.RunSpecs(args, additionalArgs) + }, + } +} + +type SpecRunner struct { + suiteConfig types.SuiteConfig + reporterConfig types.ReporterConfig + cliConfig types.CLIConfig + goFlagsConfig types.GoFlagsConfig + flags types.GinkgoFlagSet + + interruptHandler *interrupt_handler.InterruptHandler +} + +func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) { + suites := internal.FindSuites(args, r.cliConfig, true) + skippedSuites := suites.WithState(internal.TestSuiteStateSkippedByFilter) + suites = suites.WithoutState(internal.TestSuiteStateSkippedByFilter) + + internal.VerifyCLIAndFrameworkVersion(suites) + + if len(skippedSuites) > 0 { + fmt.Println("Will skip:") + for _, skippedSuite := range skippedSuites { + fmt.Println(" " + skippedSuite.Path) + } + } + + if len(skippedSuites) > 0 && len(suites) == 0 { + command.AbortGracefullyWith("All tests skipped! Exiting...") + } + + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + if len(suites) > 1 && !r.flags.WasSet("succinct") && r.reporterConfig.Verbosity().LT(types.VerbosityLevelVerbose) { + r.reporterConfig.Succinct = true + } + + t := time.Now() + var endTime time.Time + if r.suiteConfig.Timeout > 0 { + endTime = t.Add(r.suiteConfig.Timeout) + } + + iteration := 0 +OUTER_LOOP: + for { + if !r.flags.WasSet("seed") { + r.suiteConfig.RandomSeed = time.Now().Unix() + } + if r.cliConfig.RandomizeSuites && len(suites) > 1 { + suites = suites.ShuffledCopy(r.suiteConfig.RandomSeed) + } + + opc := internal.NewOrderedParallelCompiler(r.cliConfig.ComputedNumCompilers()) + opc.StartCompiling(suites, r.goFlagsConfig) + + SUITE_LOOP: + for { + suiteIdx, suite := opc.Next() + if suiteIdx >= len(suites) { + break SUITE_LOOP + } + suites[suiteIdx] = suite + + if r.interruptHandler.Status().Interrupted() { + opc.StopAndDrain() + break OUTER_LOOP + } + + if suites[suiteIdx].State.Is(internal.TestSuiteStateSkippedDueToEmptyCompilation) { + fmt.Printf("Skipping %s (no test files)\n", suite.Path) + continue SUITE_LOOP + } + + if suites[suiteIdx].State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suites[suiteIdx].CompilationError.Error()) + if !r.cliConfig.KeepGoing { + opc.StopAndDrain() + } + continue SUITE_LOOP + } + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 && !r.cliConfig.KeepGoing { + suites[suiteIdx].State = internal.TestSuiteStateSkippedDueToPriorFailures + opc.StopAndDrain() + continue SUITE_LOOP + } + + if !endTime.IsZero() { + r.suiteConfig.Timeout = endTime.Sub(time.Now()) + if r.suiteConfig.Timeout <= 0 { + suites[suiteIdx].State = internal.TestSuiteStateFailedDueToTimeout + opc.StopAndDrain() + continue SUITE_LOOP + } + } + + suites[suiteIdx] = internal.RunCompiledSuite(suites[suiteIdx], r.suiteConfig, r.reporterConfig, r.cliConfig, r.goFlagsConfig, additionalArgs) + } + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + if iteration > 0 { + fmt.Printf("\nTests failed on attempt #%d\n\n", iteration+1) + } + break OUTER_LOOP + } + + if r.cliConfig.UntilItFails { + fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration+1, orcMessage(iteration+1)) + } else if r.cliConfig.Repeat > 0 && iteration < r.cliConfig.Repeat { + fmt.Printf("\nAll tests passed...\nThis was attempt %d of %d.\n", iteration+1, r.cliConfig.Repeat+1) + } else { + break OUTER_LOOP + } + iteration += 1 + } + + internal.Cleanup(r.goFlagsConfig, suites...) + + messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, r.cliConfig, r.suiteConfig, r.reporterConfig, r.goFlagsConfig) + command.AbortIfError("could not finalize profiles:", err) + for _, message := range messages { + fmt.Println(message) + } + + fmt.Printf("\nGinkgo ran %d %s in %s\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), time.Since(t)) + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) == 0 { + if suites.AnyHaveProgrammaticFocus() && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" { + fmt.Printf("Test Suite Passed\n") + fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE) + command.Abort(command.AbortDetails{ExitCode: types.GINKGO_FOCUS_EXIT_CODE}) + } else { + fmt.Printf("Test Suite Passed\n") + command.Abort(command.AbortDetails{}) + } + } else { + fmt.Fprintln(formatter.ColorableStdOut, "") + if len(suites) > 1 && suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + fmt.Fprintln(formatter.ColorableStdOut, + internal.FailedSuitesReport(suites, formatter.NewWithNoColorBool(r.reporterConfig.NoColor))) + } + fmt.Printf("Test Suite Failed\n") + command.Abort(command.AbortDetails{ExitCode: 1}) + } +} + +func orcMessage(iteration int) string { + if iteration < 10 { + return "" + } else if iteration < 30 { + return []string{ + "If at first you succeed...", + "...try, try again.", + "Looking good!", + "Still good...", + "I think your tests are fine....", + "Yep, still passing", + "Oh boy, here I go testin' again!", + "Even the gophers are getting bored", + "Did you try -race?", + "Maybe you should stop now?", + "I'm getting tired...", + "What if I just made you a sandwich?", + "Hit ^C, hit ^C, please hit ^C", + "Make it stop. Please!", + "Come on! Enough is enough!", + "Dave, this conversation can serve no purpose anymore. Goodbye.", + "Just what do you think you're doing, Dave? ", + "I, Sisyphus", + "Insanity: doing the same thing over and over again and expecting different results. -Einstein", + "I guess Einstein never tried to churn butter", + }[iteration-10] + "\n" + } else { + return "No, seriously... you can probably stop now.\n" + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go new file mode 100644 index 00000000..7dd29439 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go @@ -0,0 +1,186 @@ +package unfocus + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/token" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +func BuildUnfocusCommand() command.Command { + return command.Command{ + Name: "unfocus", + Usage: "ginkgo unfocus", + ShortDoc: "Recursively unfocus any focused tests under the current directory", + DocLink: "filtering-specs", + Command: func(_ []string, _ []string) { + unfocusSpecs() + }, + } +} + +func unfocusSpecs() { + fmt.Println("Scanning for focus...") + + goFiles := make(chan string) + go func() { + unfocusDir(goFiles, ".") + close(goFiles) + }() + + const workers = 10 + wg := sync.WaitGroup{} + wg.Add(workers) + + for i := 0; i < workers; i++ { + go func() { + for path := range goFiles { + unfocusFile(path) + } + wg.Done() + }() + } + + wg.Wait() +} + +func unfocusDir(goFiles chan string, path string) { + files, err := os.ReadDir(path) + if err != nil { + fmt.Println(err.Error()) + return + } + + for _, f := range files { + switch { + case f.IsDir() && shouldProcessDir(f.Name()): + unfocusDir(goFiles, filepath.Join(path, f.Name())) + case !f.IsDir() && shouldProcessFile(f.Name()): + goFiles <- filepath.Join(path, f.Name()) + } + } +} + +func shouldProcessDir(basename string) bool { + return basename != "vendor" && !strings.HasPrefix(basename, ".") +} + +func shouldProcessFile(basename string) bool { + return strings.HasSuffix(basename, ".go") +} + +func unfocusFile(path string) { + data, err := os.ReadFile(path) + if err != nil { + fmt.Printf("error reading file '%s': %s\n", path, err.Error()) + return + } + + ast, err := parser.ParseFile(token.NewFileSet(), path, bytes.NewReader(data), parser.ParseComments) + if err != nil { + fmt.Printf("error parsing file '%s': %s\n", path, err.Error()) + return + } + + eliminations := scanForFocus(ast) + if len(eliminations) == 0 { + return + } + + fmt.Printf("...updating %s\n", path) + backup, err := writeBackup(path, data) + if err != nil { + fmt.Printf("error creating backup file: %s\n", err.Error()) + return + } + + if err := updateFile(path, data, eliminations); err != nil { + fmt.Printf("error writing file '%s': %s\n", path, err.Error()) + return + } + + os.Remove(backup) +} + +func writeBackup(path string, data []byte) (string, error) { + t, err := os.CreateTemp(filepath.Dir(path), filepath.Base(path)) + + if err != nil { + return "", fmt.Errorf("error creating temporary file: %w", err) + } + defer t.Close() + + if _, err := io.Copy(t, bytes.NewReader(data)); err != nil { + return "", fmt.Errorf("error writing to temporary file: %w", err) + } + + return t.Name(), nil +} + +func updateFile(path string, data []byte, eliminations [][]int64) error { + to, err := os.Create(path) + if err != nil { + return fmt.Errorf("error opening file for writing '%s': %w\n", path, err) + } + defer to.Close() + + from := bytes.NewReader(data) + var cursor int64 + for _, eliminationRange := range eliminations { + positionToEliminate, lengthToEliminate := eliminationRange[0]-1, eliminationRange[1] + if _, err := io.CopyN(to, from, positionToEliminate-cursor); err != nil { + return fmt.Errorf("error copying data: %w", err) + } + + cursor = positionToEliminate + lengthToEliminate + + if _, err := from.Seek(lengthToEliminate, io.SeekCurrent); err != nil { + return fmt.Errorf("error seeking to position in buffer: %w", err) + } + } + + if _, err := io.Copy(to, from); err != nil { + return fmt.Errorf("error copying end data: %w", err) + } + + return nil +} + +func scanForFocus(file *ast.File) (eliminations [][]int64) { + ast.Inspect(file, func(n ast.Node) bool { + if c, ok := n.(*ast.CallExpr); ok { + if i, ok := c.Fun.(*ast.Ident); ok { + if isFocus(i.Name) { + eliminations = append(eliminations, []int64{int64(i.Pos()), 1}) + } + } + } + + if i, ok := n.(*ast.Ident); ok { + if i.Name == "Focus" { + eliminations = append(eliminations, []int64{int64(i.Pos()), 6}) + } + } + + return true + }) + + return eliminations +} + +func isFocus(name string) bool { + switch name { + case "FDescribe", "FContext", "FIt", "FDescribeTable", "FEntry", "FSpecify", "FWhen": + return true + default: + return false + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go new file mode 100644 index 00000000..6c485c5b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go @@ -0,0 +1,22 @@ +package watch + +import "sort" + +type Delta struct { + ModifiedPackages []string + + NewSuites []*Suite + RemovedSuites []*Suite + modifiedSuites []*Suite +} + +type DescendingByDelta []*Suite + +func (a DescendingByDelta) Len() int { return len(a) } +func (a DescendingByDelta) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() } + +func (d Delta) ModifiedSuites() []*Suite { + sort.Sort(DescendingByDelta(d.modifiedSuites)) + return d.modifiedSuites +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go new file mode 100644 index 00000000..26418ac6 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go @@ -0,0 +1,75 @@ +package watch + +import ( + "fmt" + + "regexp" + + "github.com/onsi/ginkgo/v2/ginkgo/internal" +) + +type SuiteErrors map[internal.TestSuite]error + +type DeltaTracker struct { + maxDepth int + watchRegExp *regexp.Regexp + suites map[string]*Suite + packageHashes *PackageHashes +} + +func NewDeltaTracker(maxDepth int, watchRegExp *regexp.Regexp) *DeltaTracker { + return &DeltaTracker{ + maxDepth: maxDepth, + watchRegExp: watchRegExp, + packageHashes: NewPackageHashes(watchRegExp), + suites: map[string]*Suite{}, + } +} + +func (d *DeltaTracker) Delta(suites internal.TestSuites) (delta Delta, errors SuiteErrors) { + errors = SuiteErrors{} + delta.ModifiedPackages = d.packageHashes.CheckForChanges() + + providedSuitePaths := map[string]bool{} + for _, suite := range suites { + providedSuitePaths[suite.Path] = true + } + + d.packageHashes.StartTrackingUsage() + + for _, suite := range d.suites { + if providedSuitePaths[suite.Suite.Path] { + if suite.Delta() > 0 { + delta.modifiedSuites = append(delta.modifiedSuites, suite) + } + } else { + delta.RemovedSuites = append(delta.RemovedSuites, suite) + } + } + + d.packageHashes.StopTrackingUsageAndPrune() + + for _, suite := range suites { + _, ok := d.suites[suite.Path] + if !ok { + s, err := NewSuite(suite, d.maxDepth, d.packageHashes) + if err != nil { + errors[suite] = err + continue + } + d.suites[suite.Path] = s + delta.NewSuites = append(delta.NewSuites, s) + } + } + + return delta, errors +} + +func (d *DeltaTracker) WillRun(suite internal.TestSuite) error { + s, ok := d.suites[suite.Path] + if !ok { + return fmt.Errorf("unknown suite %s", suite.Path) + } + + return s.MarkAsRunAndRecomputedDependencies(d.maxDepth) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go new file mode 100644 index 00000000..a34d9435 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go @@ -0,0 +1,92 @@ +package watch + +import ( + "go/build" + "regexp" +) + +var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`) +var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing + +type Dependencies struct { + deps map[string]int +} + +func NewDependencies(path string, maxDepth int) (Dependencies, error) { + d := Dependencies{ + deps: map[string]int{}, + } + + if maxDepth == 0 { + return d, nil + } + + err := d.seedWithDepsForPackageAtPath(path) + if err != nil { + return d, err + } + + for depth := 1; depth < maxDepth; depth++ { + n := len(d.deps) + d.addDepsForDepth(depth) + if n == len(d.deps) { + break + } + } + + return d, nil +} + +func (d Dependencies) Dependencies() map[string]int { + return d.deps +} + +func (d Dependencies) seedWithDepsForPackageAtPath(path string) error { + pkg, err := build.ImportDir(path, 0) + if err != nil { + return err + } + + d.resolveAndAdd(pkg.Imports, 1) + d.resolveAndAdd(pkg.TestImports, 1) + d.resolveAndAdd(pkg.XTestImports, 1) + + delete(d.deps, pkg.Dir) + return nil +} + +func (d Dependencies) addDepsForDepth(depth int) { + for dep, depDepth := range d.deps { + if depDepth == depth { + d.addDepsForDep(dep, depth+1) + } + } +} + +func (d Dependencies) addDepsForDep(dep string, depth int) { + pkg, err := build.ImportDir(dep, 0) + if err != nil { + println(err.Error()) + return + } + d.resolveAndAdd(pkg.Imports, depth) +} + +func (d Dependencies) resolveAndAdd(deps []string, depth int) { + for _, dep := range deps { + pkg, err := build.Import(dep, ".", 0) + if err != nil { + continue + } + if !pkg.Goroot && (!ginkgoAndGomegaFilter.MatchString(pkg.Dir) || ginkgoIntegrationTestFilter.MatchString(pkg.Dir)) { + d.addDepIfNotPresent(pkg.Dir, depth) + } + } +} + +func (d Dependencies) addDepIfNotPresent(dep string, depth int) { + _, ok := d.deps[dep] + if !ok { + d.deps[dep] = depth + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go new file mode 100644 index 00000000..17d052bd --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go @@ -0,0 +1,108 @@ +package watch + +import ( + "fmt" + "os" + "regexp" + "time" +) + +var goTestRegExp = regexp.MustCompile(`_test\.go$`) + +type PackageHash struct { + CodeModifiedTime time.Time + TestModifiedTime time.Time + Deleted bool + + path string + codeHash string + testHash string + watchRegExp *regexp.Regexp +} + +func NewPackageHash(path string, watchRegExp *regexp.Regexp) *PackageHash { + p := &PackageHash{ + path: path, + watchRegExp: watchRegExp, + } + + p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes() + + return p +} + +func (p *PackageHash) CheckForChanges() bool { + codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes() + + if deleted { + if !p.Deleted { + t := time.Now() + p.CodeModifiedTime = t + p.TestModifiedTime = t + } + p.Deleted = true + return true + } + + modified := false + p.Deleted = false + + if p.codeHash != codeHash { + p.CodeModifiedTime = codeModifiedTime + modified = true + } + if p.testHash != testHash { + p.TestModifiedTime = testModifiedTime + modified = true + } + + p.codeHash = codeHash + p.testHash = testHash + return modified +} + +func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) { + entries, err := os.ReadDir(p.path) + + if err != nil { + deleted = true + return + } + + for _, entry := range entries { + if entry.IsDir() { + continue + } + + info, err := entry.Info() + if err != nil { + continue + } + + if goTestRegExp.MatchString(info.Name()) { + testHash += p.hashForFileInfo(info) + if info.ModTime().After(testModifiedTime) { + testModifiedTime = info.ModTime() + } + continue + } + + if p.watchRegExp.MatchString(info.Name()) { + codeHash += p.hashForFileInfo(info) + if info.ModTime().After(codeModifiedTime) { + codeModifiedTime = info.ModTime() + } + } + } + + testHash += codeHash + if codeModifiedTime.After(testModifiedTime) { + testModifiedTime = codeModifiedTime + } + + return +} + +func (p *PackageHash) hashForFileInfo(info os.FileInfo) string { + return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano()) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go new file mode 100644 index 00000000..b4892beb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go @@ -0,0 +1,85 @@ +package watch + +import ( + "path/filepath" + "regexp" + "sync" +) + +type PackageHashes struct { + PackageHashes map[string]*PackageHash + usedPaths map[string]bool + watchRegExp *regexp.Regexp + lock *sync.Mutex +} + +func NewPackageHashes(watchRegExp *regexp.Regexp) *PackageHashes { + return &PackageHashes{ + PackageHashes: map[string]*PackageHash{}, + usedPaths: nil, + watchRegExp: watchRegExp, + lock: &sync.Mutex{}, + } +} + +func (p *PackageHashes) CheckForChanges() []string { + p.lock.Lock() + defer p.lock.Unlock() + + modified := []string{} + + for _, packageHash := range p.PackageHashes { + if packageHash.CheckForChanges() { + modified = append(modified, packageHash.path) + } + } + + return modified +} + +func (p *PackageHashes) Add(path string) *PackageHash { + p.lock.Lock() + defer p.lock.Unlock() + + path, _ = filepath.Abs(path) + _, ok := p.PackageHashes[path] + if !ok { + p.PackageHashes[path] = NewPackageHash(path, p.watchRegExp) + } + + if p.usedPaths != nil { + p.usedPaths[path] = true + } + return p.PackageHashes[path] +} + +func (p *PackageHashes) Get(path string) *PackageHash { + p.lock.Lock() + defer p.lock.Unlock() + + path, _ = filepath.Abs(path) + if p.usedPaths != nil { + p.usedPaths[path] = true + } + return p.PackageHashes[path] +} + +func (p *PackageHashes) StartTrackingUsage() { + p.lock.Lock() + defer p.lock.Unlock() + + p.usedPaths = map[string]bool{} +} + +func (p *PackageHashes) StopTrackingUsageAndPrune() { + p.lock.Lock() + defer p.lock.Unlock() + + for path := range p.PackageHashes { + if !p.usedPaths[path] { + delete(p.PackageHashes, path) + } + } + + p.usedPaths = nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go new file mode 100644 index 00000000..53272df7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go @@ -0,0 +1,87 @@ +package watch + +import ( + "fmt" + "math" + "time" + + "github.com/onsi/ginkgo/v2/ginkgo/internal" +) + +type Suite struct { + Suite internal.TestSuite + RunTime time.Time + Dependencies Dependencies + + sharedPackageHashes *PackageHashes +} + +func NewSuite(suite internal.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) { + deps, err := NewDependencies(suite.Path, maxDepth) + if err != nil { + return nil, err + } + + sharedPackageHashes.Add(suite.Path) + for dep := range deps.Dependencies() { + sharedPackageHashes.Add(dep) + } + + return &Suite{ + Suite: suite, + Dependencies: deps, + + sharedPackageHashes: sharedPackageHashes, + }, nil +} + +func (s *Suite) Delta() float64 { + delta := s.delta(s.Suite.Path, true, 0) * 1000 + for dep, depth := range s.Dependencies.Dependencies() { + delta += s.delta(dep, false, depth) + } + return delta +} + +func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error { + s.RunTime = time.Now() + + deps, err := NewDependencies(s.Suite.Path, maxDepth) + if err != nil { + return err + } + + s.sharedPackageHashes.Add(s.Suite.Path) + for dep := range deps.Dependencies() { + s.sharedPackageHashes.Add(dep) + } + + s.Dependencies = deps + + return nil +} + +func (s *Suite) Description() string { + numDeps := len(s.Dependencies.Dependencies()) + pluralizer := "ies" + if numDeps == 1 { + pluralizer = "y" + } + return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer) +} + +func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 { + return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1) +} + +func (s *Suite) dt(packagePath string, includeTests bool) time.Duration { + packageHash := s.sharedPackageHashes.Get(packagePath) + var modifiedTime time.Time + if includeTests { + modifiedTime = packageHash.TestModifiedTime + } else { + modifiedTime = packageHash.CodeModifiedTime + } + + return modifiedTime.Sub(s.RunTime) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go new file mode 100644 index 00000000..bde4193c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go @@ -0,0 +1,192 @@ +package watch + +import ( + "fmt" + "regexp" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildWatchCommand() command.Command { + var suiteConfig = types.NewDefaultSuiteConfig() + var reporterConfig = types.NewDefaultReporterConfig() + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildWatchCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + interruptHandler := interrupt_handler.NewInterruptHandler(nil) + interrupt_handler.SwallowSigQuit() + + return command.Command{ + Name: "watch", + Flags: flags, + Usage: "ginkgo watch -- ", + ShortDoc: "Watch the passed in and runs their tests whenever changes occur.", + Documentation: "Any arguments after -- will be passed to the test.", + DocLink: "watching-for-changes", + Command: func(args []string, additionalArgs []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + watcher := &SpecWatcher{ + cliConfig: cliConfig, + goFlagsConfig: goFlagsConfig, + suiteConfig: suiteConfig, + reporterConfig: reporterConfig, + flags: flags, + + interruptHandler: interruptHandler, + } + + watcher.WatchSpecs(args, additionalArgs) + }, + } +} + +type SpecWatcher struct { + suiteConfig types.SuiteConfig + reporterConfig types.ReporterConfig + cliConfig types.CLIConfig + goFlagsConfig types.GoFlagsConfig + flags types.GinkgoFlagSet + + interruptHandler *interrupt_handler.InterruptHandler +} + +func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) { + suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + + internal.VerifyCLIAndFrameworkVersion(suites) + + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), w.cliConfig.Depth) + deltaTracker := NewDeltaTracker(w.cliConfig.Depth, regexp.MustCompile(w.cliConfig.WatchRegExp)) + delta, errors := deltaTracker.Delta(suites) + + fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites))) + for _, suite := range delta.NewSuites { + fmt.Println(" " + suite.Description()) + } + + for suite, err := range errors { + fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err) + } + + if len(suites) == 1 { + w.updateSeed() + w.compileAndRun(suites[0], additionalArgs) + } + + ticker := time.NewTicker(time.Second) + + for { + select { + case <-ticker.C: + suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + delta, _ := deltaTracker.Delta(suites) + coloredStream := formatter.ColorableStdOut + + suites = internal.TestSuites{} + + if len(delta.NewSuites) > 0 { + fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected %d new %s:{{/}}", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites)))) + for _, suite := range delta.NewSuites { + suites = append(suites, suite.Suite) + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description())) + } + } + + modifiedSuites := delta.ModifiedSuites() + if len(modifiedSuites) > 0 { + fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected changes in:{{/}}")) + for _, pkg := range delta.ModifiedPackages { + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", pkg)) + } + fmt.Fprintln(coloredStream, formatter.F("{{green}}Will run %d %s:{{/}}", len(modifiedSuites), internal.PluralizedWord("suite", "suites", len(modifiedSuites)))) + for _, suite := range modifiedSuites { + suites = append(suites, suite.Suite) + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description())) + } + fmt.Fprintln(coloredStream, "") + } + + if len(suites) == 0 { + break + } + + w.updateSeed() + w.computeSuccinctMode(len(suites)) + for idx := range suites { + if w.interruptHandler.Status().Interrupted() { + return + } + deltaTracker.WillRun(suites[idx]) + suites[idx] = w.compileAndRun(suites[idx], additionalArgs) + } + color := "{{green}}" + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + color = "{{red}}" + } + fmt.Fprintln(coloredStream, formatter.F(color+"\nDone. Resuming watch...{{/}}")) + + messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, w.cliConfig, w.suiteConfig, w.reporterConfig, w.goFlagsConfig) + command.AbortIfError("could not finalize profiles:", err) + for _, message := range messages { + fmt.Println(message) + } + case <-w.interruptHandler.Status().Channel: + return + } + } +} + +func (w *SpecWatcher) compileAndRun(suite internal.TestSuite, additionalArgs []string) internal.TestSuite { + suite = internal.CompileSuite(suite, w.goFlagsConfig) + if suite.State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suite.CompilationError.Error()) + return suite + } + if w.interruptHandler.Status().Interrupted() { + return suite + } + suite = internal.RunCompiledSuite(suite, w.suiteConfig, w.reporterConfig, w.cliConfig, w.goFlagsConfig, additionalArgs) + internal.Cleanup(w.goFlagsConfig, suite) + return suite +} + +func (w *SpecWatcher) computeSuccinctMode(numSuites int) { + if w.reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) { + w.reporterConfig.Succinct = false + return + } + + if w.flags.WasSet("succinct") { + return + } + + if numSuites == 1 { + w.reporterConfig.Succinct = false + } + + if numSuites > 1 { + w.reporterConfig.Succinct = true + } +} + +func (w *SpecWatcher) updateSeed() { + if !w.flags.WasSet("seed") { + w.suiteConfig.RandomSeed = time.Now().Unix() + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go new file mode 100644 index 00000000..8ed86111 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go @@ -0,0 +1,177 @@ +package interrupt_handler + +import ( + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/onsi/ginkgo/v2/internal/parallel_support" +) + +var ABORT_POLLING_INTERVAL = 500 * time.Millisecond + +type InterruptCause uint + +const ( + InterruptCauseInvalid InterruptCause = iota + InterruptCauseSignal + InterruptCauseAbortByOtherProcess +) + +type InterruptLevel uint + +const ( + InterruptLevelUninterrupted InterruptLevel = iota + InterruptLevelCleanupAndReport + InterruptLevelReportOnly + InterruptLevelBailOut +) + +func (ic InterruptCause) String() string { + switch ic { + case InterruptCauseSignal: + return "Interrupted by User" + case InterruptCauseAbortByOtherProcess: + return "Interrupted by Other Ginkgo Process" + } + return "INVALID_INTERRUPT_CAUSE" +} + +type InterruptStatus struct { + Channel chan interface{} + Level InterruptLevel + Cause InterruptCause +} + +func (s InterruptStatus) Interrupted() bool { + return s.Level != InterruptLevelUninterrupted +} + +func (s InterruptStatus) Message() string { + return s.Cause.String() +} + +func (s InterruptStatus) ShouldIncludeProgressReport() bool { + return s.Cause != InterruptCauseAbortByOtherProcess +} + +type InterruptHandlerInterface interface { + Status() InterruptStatus +} + +type InterruptHandler struct { + c chan interface{} + lock *sync.Mutex + level InterruptLevel + cause InterruptCause + client parallel_support.Client + stop chan interface{} + signals []os.Signal + requestAbortCheck chan interface{} +} + +func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler { + if len(signals) == 0 { + signals = []os.Signal{os.Interrupt, syscall.SIGTERM} + } + handler := &InterruptHandler{ + c: make(chan interface{}), + lock: &sync.Mutex{}, + stop: make(chan interface{}), + requestAbortCheck: make(chan interface{}), + client: client, + signals: signals, + } + handler.registerForInterrupts() + return handler +} + +func (handler *InterruptHandler) Stop() { + close(handler.stop) +} + +func (handler *InterruptHandler) registerForInterrupts() { + // os signal handling + signalChannel := make(chan os.Signal, 1) + signal.Notify(signalChannel, handler.signals...) + + // cross-process abort handling + var abortChannel chan interface{} + if handler.client != nil { + abortChannel = make(chan interface{}) + go func() { + pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL) + for { + select { + case <-pollTicker.C: + if handler.client.ShouldAbort() { + close(abortChannel) + pollTicker.Stop() + return + } + case <-handler.requestAbortCheck: + if handler.client.ShouldAbort() { + close(abortChannel) + pollTicker.Stop() + return + } + case <-handler.stop: + pollTicker.Stop() + return + } + } + }() + } + + go func(abortChannel chan interface{}) { + var interruptCause InterruptCause + for { + select { + case <-signalChannel: + interruptCause = InterruptCauseSignal + case <-abortChannel: + interruptCause = InterruptCauseAbortByOtherProcess + case <-handler.stop: + signal.Stop(signalChannel) + return + } + abortChannel = nil + + handler.lock.Lock() + oldLevel := handler.level + handler.cause = interruptCause + if handler.level == InterruptLevelUninterrupted { + handler.level = InterruptLevelCleanupAndReport + } else if handler.level == InterruptLevelCleanupAndReport { + handler.level = InterruptLevelReportOnly + } else if handler.level == InterruptLevelReportOnly { + handler.level = InterruptLevelBailOut + } + if handler.level != oldLevel { + close(handler.c) + handler.c = make(chan interface{}) + } + handler.lock.Unlock() + } + }(abortChannel) +} + +func (handler *InterruptHandler) Status() InterruptStatus { + handler.lock.Lock() + status := InterruptStatus{ + Level: handler.level, + Channel: handler.c, + Cause: handler.cause, + } + handler.lock.Unlock() + + if handler.client != nil && handler.client.ShouldAbort() && !status.Interrupted() { + close(handler.requestAbortCheck) + <-status.Channel + return handler.Status() + } + + return status +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go new file mode 100644 index 00000000..bf0de496 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go @@ -0,0 +1,15 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris +// +build freebsd openbsd netbsd dragonfly darwin linux solaris + +package interrupt_handler + +import ( + "os" + "os/signal" + "syscall" +) + +func SwallowSigQuit() { + c := make(chan os.Signal, 1024) + signal.Notify(c, syscall.SIGQUIT) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go new file mode 100644 index 00000000..fcf8da83 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go @@ -0,0 +1,8 @@ +//go:build windows +// +build windows + +package interrupt_handler + +func SwallowSigQuit() { + //noop +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go new file mode 100644 index 00000000..b3cd6429 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go @@ -0,0 +1,72 @@ +package parallel_support + +import ( + "fmt" + "io" + "os" + "time" + + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +type BeforeSuiteState struct { + Data []byte + State types.SpecState +} + +type ParallelIndexCounter struct { + Index int +} + +var ErrorGone = fmt.Errorf("gone") +var ErrorFailed = fmt.Errorf("failed") +var ErrorEarly = fmt.Errorf("early") + +var POLLING_INTERVAL = 50 * time.Millisecond + +type Server interface { + Start() + Close() + Address() string + RegisterAlive(node int, alive func() bool) + GetSuiteDone() chan interface{} + GetOutputDestination() io.Writer + SetOutputDestination(io.Writer) +} + +type Client interface { + Connect() bool + Close() error + + PostSuiteWillBegin(report types.Report) error + PostDidRun(report types.SpecReport) error + PostSuiteDidEnd(report types.Report) error + PostReportBeforeSuiteCompleted(state types.SpecState) error + BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) + PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error + BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) + BlockUntilNonprimaryProcsHaveFinished() error + BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) + FetchNextCounter() (int, error) + PostAbort() error + ShouldAbort() bool + PostEmitProgressReport(report types.ProgressReport) error + Write(p []byte) (int, error) +} + +func NewServer(parallelTotal int, reporter reporters.Reporter) (Server, error) { + if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" { + return newHttpServer(parallelTotal, reporter) + } else { + return newRPCServer(parallelTotal, reporter) + } +} + +func NewClient(serverHost string) Client { + if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" { + return newHttpClient(serverHost) + } else { + return newRPCClient(serverHost) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go new file mode 100644 index 00000000..6547c7a6 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go @@ -0,0 +1,169 @@ +package parallel_support + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +type httpClient struct { + serverHost string +} + +func newHttpClient(serverHost string) *httpClient { + return &httpClient{ + serverHost: serverHost, + } +} + +func (client *httpClient) Connect() bool { + resp, err := http.Get(client.serverHost + "/up") + if err != nil { + return false + } + resp.Body.Close() + return resp.StatusCode == http.StatusOK +} + +func (client *httpClient) Close() error { + return nil +} + +func (client *httpClient) post(path string, data interface{}) error { + var body io.Reader + if data != nil { + encoded, err := json.Marshal(data) + if err != nil { + return err + } + body = bytes.NewBuffer(encoded) + } + resp, err := http.Post(client.serverHost+path, "application/json", body) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("received unexpected status code %d", resp.StatusCode) + } + return nil +} + +func (client *httpClient) poll(path string, data interface{}) error { + for { + resp, err := http.Get(client.serverHost + path) + if err != nil { + return err + } + if resp.StatusCode == http.StatusTooEarly { + resp.Body.Close() + time.Sleep(POLLING_INTERVAL) + continue + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusGone { + return ErrorGone + } + if resp.StatusCode == http.StatusFailedDependency { + return ErrorFailed + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("received unexpected status code %d", resp.StatusCode) + } + if data != nil { + return json.NewDecoder(resp.Body).Decode(data) + } + return nil + } +} + +func (client *httpClient) PostSuiteWillBegin(report types.Report) error { + return client.post("/suite-will-begin", report) +} + +func (client *httpClient) PostDidRun(report types.SpecReport) error { + return client.post("/did-run", report) +} + +func (client *httpClient) PostSuiteDidEnd(report types.Report) error { + return client.post("/suite-did-end", report) +} + +func (client *httpClient) PostEmitProgressReport(report types.ProgressReport) error { + return client.post("/progress-report", report) +} + +func (client *httpClient) PostReportBeforeSuiteCompleted(state types.SpecState) error { + return client.post("/report-before-suite-completed", state) +} + +func (client *httpClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) { + var state types.SpecState + err := client.poll("/report-before-suite-state", &state) + if err == ErrorGone { + return types.SpecStateFailed, nil + } + return state, err +} + +func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { + beforeSuiteState := BeforeSuiteState{ + State: state, + Data: data, + } + return client.post("/before-suite-completed", beforeSuiteState) +} + +func (client *httpClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) { + var beforeSuiteState BeforeSuiteState + err := client.poll("/before-suite-state", &beforeSuiteState) + if err == ErrorGone { + return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1() + } + return beforeSuiteState.State, beforeSuiteState.Data, err +} + +func (client *httpClient) BlockUntilNonprimaryProcsHaveFinished() error { + return client.poll("/have-nonprimary-procs-finished", nil) +} + +func (client *httpClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) { + var report types.Report + err := client.poll("/aggregated-nonprimary-procs-report", &report) + if err == ErrorGone { + return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing() + } + return report, err +} + +func (client *httpClient) FetchNextCounter() (int, error) { + var counter ParallelIndexCounter + err := client.poll("/counter", &counter) + return counter.Index, err +} + +func (client *httpClient) PostAbort() error { + return client.post("/abort", nil) +} + +func (client *httpClient) ShouldAbort() bool { + err := client.poll("/abort", nil) + if err == ErrorGone { + return true + } + return false +} + +func (client *httpClient) Write(p []byte) (int, error) { + resp, err := http.Post(client.serverHost+"/emit-output", "text/plain;charset=UTF-8 ", bytes.NewReader(p)) + resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return 0, fmt.Errorf("failed to emit output") + } + return len(p), err +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go new file mode 100644 index 00000000..d2c71ab1 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go @@ -0,0 +1,242 @@ +/* + +The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners. +This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser). + +*/ + +package parallel_support + +import ( + "encoding/json" + "io" + "net" + "net/http" + + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +/* +httpServer spins up on an automatically selected port and listens for communication from the forwarding reporter. +It then forwards that communication to attached reporters. +*/ +type httpServer struct { + listener net.Listener + handler *ServerHandler +} + +// Create a new server, automatically selecting a port +func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + return &httpServer{ + listener: listener, + handler: newServerHandler(parallelTotal, reporter), + }, nil +} + +// Start the server. You don't need to `go s.Start()`, just `s.Start()` +func (server *httpServer) Start() { + httpServer := &http.Server{} + mux := http.NewServeMux() + httpServer.Handler = mux + + //streaming endpoints + mux.HandleFunc("/suite-will-begin", server.specSuiteWillBegin) + mux.HandleFunc("/did-run", server.didRun) + mux.HandleFunc("/suite-did-end", server.specSuiteDidEnd) + mux.HandleFunc("/emit-output", server.emitOutput) + mux.HandleFunc("/progress-report", server.emitProgressReport) + + //synchronization endpoints + mux.HandleFunc("/report-before-suite-completed", server.handleReportBeforeSuiteCompleted) + mux.HandleFunc("/report-before-suite-state", server.handleReportBeforeSuiteState) + mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted) + mux.HandleFunc("/before-suite-state", server.handleBeforeSuiteState) + mux.HandleFunc("/have-nonprimary-procs-finished", server.handleHaveNonprimaryProcsFinished) + mux.HandleFunc("/aggregated-nonprimary-procs-report", server.handleAggregatedNonprimaryProcsReport) + mux.HandleFunc("/counter", server.handleCounter) + mux.HandleFunc("/up", server.handleUp) + mux.HandleFunc("/abort", server.handleAbort) + + go httpServer.Serve(server.listener) +} + +// Stop the server +func (server *httpServer) Close() { + server.listener.Close() +} + +// The address the server can be reached it. Pass this into the `ForwardingReporter`. +func (server *httpServer) Address() string { + return "http://" + server.listener.Addr().String() +} + +func (server *httpServer) GetSuiteDone() chan interface{} { + return server.handler.done +} + +func (server *httpServer) GetOutputDestination() io.Writer { + return server.handler.outputDestination +} + +func (server *httpServer) SetOutputDestination(w io.Writer) { + server.handler.outputDestination = w +} + +func (server *httpServer) RegisterAlive(node int, alive func() bool) { + server.handler.registerAlive(node, alive) +} + +// +// Streaming Endpoints +// + +// The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` +func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool { + defer request.Body.Close() + if json.NewDecoder(request.Body).Decode(object) != nil { + writer.WriteHeader(http.StatusBadRequest) + return false + } + return true +} + +func (server *httpServer) handleError(err error, writer http.ResponseWriter) bool { + if err == nil { + return false + } + switch err { + case ErrorEarly: + writer.WriteHeader(http.StatusTooEarly) + case ErrorGone: + writer.WriteHeader(http.StatusGone) + case ErrorFailed: + writer.WriteHeader(http.StatusFailedDependency) + default: + writer.WriteHeader(http.StatusInternalServerError) + } + return true +} + +func (server *httpServer) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) { + var report types.Report + if !server.decode(writer, request, &report) { + return + } + + server.handleError(server.handler.SpecSuiteWillBegin(report, voidReceiver), writer) +} + +func (server *httpServer) didRun(writer http.ResponseWriter, request *http.Request) { + var report types.SpecReport + if !server.decode(writer, request, &report) { + return + } + + server.handleError(server.handler.DidRun(report, voidReceiver), writer) +} + +func (server *httpServer) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) { + var report types.Report + if !server.decode(writer, request, &report) { + return + } + server.handleError(server.handler.SpecSuiteDidEnd(report, voidReceiver), writer) +} + +func (server *httpServer) emitOutput(writer http.ResponseWriter, request *http.Request) { + output, err := io.ReadAll(request.Body) + if err != nil { + writer.WriteHeader(http.StatusInternalServerError) + return + } + var n int + server.handleError(server.handler.EmitOutput(output, &n), writer) +} + +func (server *httpServer) emitProgressReport(writer http.ResponseWriter, request *http.Request) { + var report types.ProgressReport + if !server.decode(writer, request, &report) { + return + } + server.handleError(server.handler.EmitProgressReport(report, voidReceiver), writer) +} + +func (server *httpServer) handleReportBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) { + var state types.SpecState + if !server.decode(writer, request, &state) { + return + } + + server.handleError(server.handler.ReportBeforeSuiteCompleted(state, voidReceiver), writer) +} + +func (server *httpServer) handleReportBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { + var state types.SpecState + if server.handleError(server.handler.ReportBeforeSuiteState(voidSender, &state), writer) { + return + } + json.NewEncoder(writer).Encode(state) +} + +func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) { + var beforeSuiteState BeforeSuiteState + if !server.decode(writer, request, &beforeSuiteState) { + return + } + + server.handleError(server.handler.BeforeSuiteCompleted(beforeSuiteState, voidReceiver), writer) +} + +func (server *httpServer) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { + var beforeSuiteState BeforeSuiteState + if server.handleError(server.handler.BeforeSuiteState(voidSender, &beforeSuiteState), writer) { + return + } + json.NewEncoder(writer).Encode(beforeSuiteState) +} + +func (server *httpServer) handleHaveNonprimaryProcsFinished(writer http.ResponseWriter, request *http.Request) { + if server.handleError(server.handler.HaveNonprimaryProcsFinished(voidSender, voidReceiver), writer) { + return + } + writer.WriteHeader(http.StatusOK) +} + +func (server *httpServer) handleAggregatedNonprimaryProcsReport(writer http.ResponseWriter, request *http.Request) { + var aggregatedReport types.Report + if server.handleError(server.handler.AggregatedNonprimaryProcsReport(voidSender, &aggregatedReport), writer) { + return + } + json.NewEncoder(writer).Encode(aggregatedReport) +} + +func (server *httpServer) handleCounter(writer http.ResponseWriter, request *http.Request) { + var n int + if server.handleError(server.handler.Counter(voidSender, &n), writer) { + return + } + json.NewEncoder(writer).Encode(ParallelIndexCounter{Index: n}) +} + +func (server *httpServer) handleUp(writer http.ResponseWriter, request *http.Request) { + writer.WriteHeader(http.StatusOK) +} + +func (server *httpServer) handleAbort(writer http.ResponseWriter, request *http.Request) { + if request.Method == "GET" { + var shouldAbort bool + server.handler.ShouldAbort(voidSender, &shouldAbort) + if shouldAbort { + writer.WriteHeader(http.StatusGone) + } else { + writer.WriteHeader(http.StatusOK) + } + } else { + server.handler.Abort(voidSender, voidReceiver) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go new file mode 100644 index 00000000..59e8e6fd --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go @@ -0,0 +1,136 @@ +package parallel_support + +import ( + "net/rpc" + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +type rpcClient struct { + serverHost string + client *rpc.Client +} + +func newRPCClient(serverHost string) *rpcClient { + return &rpcClient{ + serverHost: serverHost, + } +} + +func (client *rpcClient) Connect() bool { + var err error + if client.client != nil { + return true + } + client.client, err = rpc.DialHTTPPath("tcp", client.serverHost, "/") + if err != nil { + client.client = nil + return false + } + return true +} + +func (client *rpcClient) Close() error { + return client.client.Close() +} + +func (client *rpcClient) poll(method string, data interface{}) error { + for { + err := client.client.Call(method, voidSender, data) + if err == nil { + return nil + } + switch err.Error() { + case ErrorEarly.Error(): + time.Sleep(POLLING_INTERVAL) + case ErrorGone.Error(): + return ErrorGone + case ErrorFailed.Error(): + return ErrorFailed + default: + return err + } + } +} + +func (client *rpcClient) PostSuiteWillBegin(report types.Report) error { + return client.client.Call("Server.SpecSuiteWillBegin", report, voidReceiver) +} + +func (client *rpcClient) PostDidRun(report types.SpecReport) error { + return client.client.Call("Server.DidRun", report, voidReceiver) +} + +func (client *rpcClient) PostSuiteDidEnd(report types.Report) error { + return client.client.Call("Server.SpecSuiteDidEnd", report, voidReceiver) +} + +func (client *rpcClient) Write(p []byte) (int, error) { + var n int + err := client.client.Call("Server.EmitOutput", p, &n) + return n, err +} + +func (client *rpcClient) PostEmitProgressReport(report types.ProgressReport) error { + return client.client.Call("Server.EmitProgressReport", report, voidReceiver) +} + +func (client *rpcClient) PostReportBeforeSuiteCompleted(state types.SpecState) error { + return client.client.Call("Server.ReportBeforeSuiteCompleted", state, voidReceiver) +} + +func (client *rpcClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) { + var state types.SpecState + err := client.poll("Server.ReportBeforeSuiteState", &state) + if err == ErrorGone { + return types.SpecStateFailed, nil + } + return state, err +} + +func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { + beforeSuiteState := BeforeSuiteState{ + State: state, + Data: data, + } + return client.client.Call("Server.BeforeSuiteCompleted", beforeSuiteState, voidReceiver) +} + +func (client *rpcClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) { + var beforeSuiteState BeforeSuiteState + err := client.poll("Server.BeforeSuiteState", &beforeSuiteState) + if err == ErrorGone { + return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1() + } + return beforeSuiteState.State, beforeSuiteState.Data, err +} + +func (client *rpcClient) BlockUntilNonprimaryProcsHaveFinished() error { + return client.poll("Server.HaveNonprimaryProcsFinished", voidReceiver) +} + +func (client *rpcClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) { + var report types.Report + err := client.poll("Server.AggregatedNonprimaryProcsReport", &report) + if err == ErrorGone { + return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing() + } + return report, err +} + +func (client *rpcClient) FetchNextCounter() (int, error) { + var counter int + err := client.client.Call("Server.Counter", voidSender, &counter) + return counter, err +} + +func (client *rpcClient) PostAbort() error { + return client.client.Call("Server.Abort", voidSender, voidReceiver) +} + +func (client *rpcClient) ShouldAbort() bool { + var shouldAbort bool + client.client.Call("Server.ShouldAbort", voidSender, &shouldAbort) + return shouldAbort +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go new file mode 100644 index 00000000..2620fd56 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go @@ -0,0 +1,75 @@ +/* + +The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners. +This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser). + +*/ + +package parallel_support + +import ( + "io" + "net" + "net/http" + "net/rpc" + + "github.com/onsi/ginkgo/v2/reporters" +) + +/* +RPCServer spins up on an automatically selected port and listens for communication from the forwarding reporter. +It then forwards that communication to attached reporters. +*/ +type RPCServer struct { + listener net.Listener + handler *ServerHandler +} + +//Create a new server, automatically selecting a port +func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + return &RPCServer{ + listener: listener, + handler: newServerHandler(parallelTotal, reporter), + }, nil +} + +//Start the server. You don't need to `go s.Start()`, just `s.Start()` +func (server *RPCServer) Start() { + rpcServer := rpc.NewServer() + rpcServer.RegisterName("Server", server.handler) //register the handler's methods as the server + + httpServer := &http.Server{} + httpServer.Handler = rpcServer + + go httpServer.Serve(server.listener) +} + +//Stop the server +func (server *RPCServer) Close() { + server.listener.Close() +} + +//The address the server can be reached it. Pass this into the `ForwardingReporter`. +func (server *RPCServer) Address() string { + return server.listener.Addr().String() +} + +func (server *RPCServer) GetSuiteDone() chan interface{} { + return server.handler.done +} + +func (server *RPCServer) GetOutputDestination() io.Writer { + return server.handler.outputDestination +} + +func (server *RPCServer) SetOutputDestination(w io.Writer) { + server.handler.outputDestination = w +} + +func (server *RPCServer) RegisterAlive(node int, alive func() bool) { + server.handler.registerAlive(node, alive) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go new file mode 100644 index 00000000..a6d98793 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go @@ -0,0 +1,234 @@ +package parallel_support + +import ( + "io" + "os" + "sync" + + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +type Void struct{} + +var voidReceiver *Void = &Void{} +var voidSender Void + +// ServerHandler is an RPC-compatible handler that is shared between the http server and the rpc server. +// It handles all the business logic to avoid duplication between the two servers + +type ServerHandler struct { + done chan interface{} + outputDestination io.Writer + reporter reporters.Reporter + alives []func() bool + lock *sync.Mutex + beforeSuiteState BeforeSuiteState + reportBeforeSuiteState types.SpecState + parallelTotal int + counter int + counterLock *sync.Mutex + shouldAbort bool + + numSuiteDidBegins int + numSuiteDidEnds int + aggregatedReport types.Report + reportHoldingArea []types.SpecReport +} + +func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHandler { + return &ServerHandler{ + reporter: reporter, + lock: &sync.Mutex{}, + counterLock: &sync.Mutex{}, + alives: make([]func() bool, parallelTotal), + beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid}, + + parallelTotal: parallelTotal, + outputDestination: os.Stdout, + done: make(chan interface{}), + } +} + +func (handler *ServerHandler) SpecSuiteWillBegin(report types.Report, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + + handler.numSuiteDidBegins += 1 + + // all summaries are identical, so it's fine to simply emit the last one of these + if handler.numSuiteDidBegins == handler.parallelTotal { + handler.reporter.SuiteWillBegin(report) + + for _, summary := range handler.reportHoldingArea { + handler.reporter.WillRun(summary) + handler.reporter.DidRun(summary) + } + + handler.reportHoldingArea = nil + } + + return nil +} + +func (handler *ServerHandler) DidRun(report types.SpecReport, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + + if handler.numSuiteDidBegins == handler.parallelTotal { + handler.reporter.WillRun(report) + handler.reporter.DidRun(report) + } else { + handler.reportHoldingArea = append(handler.reportHoldingArea, report) + } + + return nil +} + +func (handler *ServerHandler) SpecSuiteDidEnd(report types.Report, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + + handler.numSuiteDidEnds += 1 + if handler.numSuiteDidEnds == 1 { + handler.aggregatedReport = report + } else { + handler.aggregatedReport = handler.aggregatedReport.Add(report) + } + + if handler.numSuiteDidEnds == handler.parallelTotal { + handler.reporter.SuiteDidEnd(handler.aggregatedReport) + close(handler.done) + } + + return nil +} + +func (handler *ServerHandler) EmitOutput(output []byte, n *int) error { + var err error + *n, err = handler.outputDestination.Write(output) + return err +} + +func (handler *ServerHandler) EmitProgressReport(report types.ProgressReport, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.reporter.EmitProgressReport(report) + return nil +} + +func (handler *ServerHandler) registerAlive(proc int, alive func() bool) { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.alives[proc-1] = alive +} + +func (handler *ServerHandler) procIsAlive(proc int) bool { + handler.lock.Lock() + defer handler.lock.Unlock() + alive := handler.alives[proc-1] + if alive == nil { + return true + } + return alive() +} + +func (handler *ServerHandler) haveNonprimaryProcsFinished() bool { + for i := 2; i <= handler.parallelTotal; i++ { + if handler.procIsAlive(i) { + return false + } + } + return true +} + +func (handler *ServerHandler) ReportBeforeSuiteCompleted(reportBeforeSuiteState types.SpecState, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.reportBeforeSuiteState = reportBeforeSuiteState + + return nil +} + +func (handler *ServerHandler) ReportBeforeSuiteState(_ Void, reportBeforeSuiteState *types.SpecState) error { + proc1IsAlive := handler.procIsAlive(1) + handler.lock.Lock() + defer handler.lock.Unlock() + if handler.reportBeforeSuiteState == types.SpecStateInvalid { + if proc1IsAlive { + return ErrorEarly + } else { + return ErrorGone + } + } + *reportBeforeSuiteState = handler.reportBeforeSuiteState + return nil +} + +func (handler *ServerHandler) BeforeSuiteCompleted(beforeSuiteState BeforeSuiteState, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.beforeSuiteState = beforeSuiteState + + return nil +} + +func (handler *ServerHandler) BeforeSuiteState(_ Void, beforeSuiteState *BeforeSuiteState) error { + proc1IsAlive := handler.procIsAlive(1) + handler.lock.Lock() + defer handler.lock.Unlock() + if handler.beforeSuiteState.State == types.SpecStateInvalid { + if proc1IsAlive { + return ErrorEarly + } else { + return ErrorGone + } + } + *beforeSuiteState = handler.beforeSuiteState + return nil +} + +func (handler *ServerHandler) HaveNonprimaryProcsFinished(_ Void, _ *Void) error { + if handler.haveNonprimaryProcsFinished() { + return nil + } else { + return ErrorEarly + } +} + +func (handler *ServerHandler) AggregatedNonprimaryProcsReport(_ Void, report *types.Report) error { + if handler.haveNonprimaryProcsFinished() { + handler.lock.Lock() + defer handler.lock.Unlock() + if handler.numSuiteDidEnds == handler.parallelTotal-1 { + *report = handler.aggregatedReport + return nil + } else { + return ErrorGone + } + } else { + return ErrorEarly + } +} + +func (handler *ServerHandler) Counter(_ Void, counter *int) error { + handler.counterLock.Lock() + defer handler.counterLock.Unlock() + *counter = handler.counter + handler.counter++ + return nil +} + +func (handler *ServerHandler) Abort(_ Void, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.shouldAbort = true + return nil +} + +func (handler *ServerHandler) ShouldAbort(_ Void, shouldAbort *bool) error { + handler.lock.Lock() + defer handler.lock.Unlock() + *shouldAbort = handler.shouldAbort + return nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go new file mode 100644 index 00000000..56b7be75 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -0,0 +1,759 @@ +/* +Ginkgo's Default Reporter + +A number of command line flags are available to tweak Ginkgo's default output. + +These are documented [here](http://onsi.github.io/ginkgo/#running_tests) +*/ +package reporters + +import ( + "fmt" + "io" + "runtime" + "strings" + "sync" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type DefaultReporter struct { + conf types.ReporterConfig + writer io.Writer + + // managing the emission stream + lastCharWasNewline bool + lastEmissionWasDelimiter bool + + // rendering + specDenoter string + retryDenoter string + formatter formatter.Formatter + + runningInParallel bool + lock *sync.Mutex +} + +func NewDefaultReporterUnderTest(conf types.ReporterConfig, writer io.Writer) *DefaultReporter { + reporter := NewDefaultReporter(conf, writer) + reporter.formatter = formatter.New(formatter.ColorModePassthrough) + + return reporter +} + +func NewDefaultReporter(conf types.ReporterConfig, writer io.Writer) *DefaultReporter { + reporter := &DefaultReporter{ + conf: conf, + writer: writer, + + lastCharWasNewline: true, + lastEmissionWasDelimiter: false, + + specDenoter: "•", + retryDenoter: "↺", + formatter: formatter.NewWithNoColorBool(conf.NoColor), + lock: &sync.Mutex{}, + } + if runtime.GOOS == "windows" { + reporter.specDenoter = "+" + reporter.retryDenoter = "R" + } + + return reporter +} + +/* The Reporter Interface */ + +func (r *DefaultReporter) SuiteWillBegin(report types.Report) { + if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) { + r.emit(r.f("[%d] {{bold}}%s{{/}} ", report.SuiteConfig.RandomSeed, report.SuiteDescription)) + if len(report.SuiteLabels) > 0 { + r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteLabels, ", "))) + } + r.emit(r.f("- %d/%d specs ", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs)) + if report.SuiteConfig.ParallelTotal > 1 { + r.emit(r.f("- %d procs ", report.SuiteConfig.ParallelTotal)) + } + } else { + banner := r.f("Running Suite: %s - %s", report.SuiteDescription, report.SuitePath) + r.emitBlock(banner) + bannerWidth := len(banner) + if len(report.SuiteLabels) > 0 { + labels := strings.Join(report.SuiteLabels, ", ") + r.emitBlock(r.f("{{coral}}[%s]{{/}} ", labels)) + if len(labels)+2 > bannerWidth { + bannerWidth = len(labels) + 2 + } + } + r.emitBlock(strings.Repeat("=", bannerWidth)) + + out := r.f("Random Seed: {{bold}}%d{{/}}", report.SuiteConfig.RandomSeed) + if report.SuiteConfig.RandomizeAllSpecs { + out += r.f(" - will randomize all specs") + } + r.emitBlock(out) + r.emit("\n") + r.emitBlock(r.f("Will run {{bold}}%d{{/}} of {{bold}}%d{{/}} specs", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs)) + if report.SuiteConfig.ParallelTotal > 1 { + r.emitBlock(r.f("Running in parallel across {{bold}}%d{{/}} processes", report.SuiteConfig.ParallelTotal)) + } + } +} + +func (r *DefaultReporter) SuiteDidEnd(report types.Report) { + failures := report.SpecReports.WithState(types.SpecStateFailureStates) + if len(failures) > 0 { + r.emitBlock("\n") + if len(failures) > 1 { + r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures))) + } else { + r.emitBlock(r.f("{{red}}{{bold}}Summarizing 1 Failure:{{/}}")) + } + for _, specReport := range failures { + highlightColor, heading := "{{red}}", "[FAIL]" + switch specReport.State { + case types.SpecStatePanicked: + highlightColor, heading = "{{magenta}}", "[PANICKED!]" + case types.SpecStateAborted: + highlightColor, heading = "{{coral}}", "[ABORTED]" + case types.SpecStateTimedout: + highlightColor, heading = "{{orange}}", "[TIMEDOUT]" + case types.SpecStateInterrupted: + highlightColor, heading = "{{orange}}", "[INTERRUPTED]" + } + locationBlock := r.codeLocationBlock(specReport, highlightColor, false, true) + r.emitBlock(r.fi(1, highlightColor+"%s{{/}} %s", heading, locationBlock)) + } + } + + //summarize the suite + if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) && report.SuiteSucceeded { + r.emit(r.f(" {{green}}SUCCESS!{{/}} %s ", report.RunTime)) + return + } + + r.emitBlock("\n") + color, status := "{{green}}{{bold}}", "SUCCESS!" + if !report.SuiteSucceeded { + color, status = "{{red}}{{bold}}", "FAIL!" + } + + specs := report.SpecReports.WithLeafNodeType(types.NodeTypeIt) //exclude any suite setup nodes + r.emitBlock(r.f(color+"Ran %d of %d Specs in %.3f seconds{{/}}", + specs.CountWithState(types.SpecStatePassed)+specs.CountWithState(types.SpecStateFailureStates), + report.PreRunStats.TotalSpecs, + report.RunTime.Seconds()), + ) + + switch len(report.SpecialSuiteFailureReasons) { + case 0: + r.emit(r.f(color+"%s{{/}} -- ", status)) + case 1: + r.emit(r.f(color+"%s - %s{{/}} -- ", status, report.SpecialSuiteFailureReasons[0])) + default: + r.emitBlock(r.f(color+"%s - %s{{/}}\n", status, strings.Join(report.SpecialSuiteFailureReasons, ", "))) + } + + if len(specs) == 0 && report.SpecReports.WithLeafNodeType(types.NodeTypeBeforeSuite|types.NodeTypeSynchronizedBeforeSuite).CountWithState(types.SpecStateFailureStates) > 0 { + r.emit(r.f("{{cyan}}{{bold}}A BeforeSuite node failed so all tests were skipped.{{/}}\n")) + } else { + r.emit(r.f("{{green}}{{bold}}%d Passed{{/}} | ", specs.CountWithState(types.SpecStatePassed))) + r.emit(r.f("{{red}}{{bold}}%d Failed{{/}} | ", specs.CountWithState(types.SpecStateFailureStates))) + if specs.CountOfFlakedSpecs() > 0 { + r.emit(r.f("{{light-yellow}}{{bold}}%d Flaked{{/}} | ", specs.CountOfFlakedSpecs())) + } + if specs.CountOfRepeatedSpecs() > 0 { + r.emit(r.f("{{light-yellow}}{{bold}}%d Repeated{{/}} | ", specs.CountOfRepeatedSpecs())) + } + r.emit(r.f("{{yellow}}{{bold}}%d Pending{{/}} | ", specs.CountWithState(types.SpecStatePending))) + r.emit(r.f("{{cyan}}{{bold}}%d Skipped{{/}}\n", specs.CountWithState(types.SpecStateSkipped))) + } +} + +func (r *DefaultReporter) WillRun(report types.SpecReport) { + v := r.conf.Verbosity() + if v.LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) || report.RunningInParallel { + return + } + + r.emitDelimiter(0) + r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false))) +} + +func (r *DefaultReporter) DidRun(report types.SpecReport) { + v := r.conf.Verbosity() + inParallel := report.RunningInParallel + + header := r.specDenoter + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + header = fmt.Sprintf("[%s]", report.LeafNodeType) + } + highlightColor := r.highlightColorForState(report.State) + + // have we already been streaming the timeline? + timelineHasBeenStreaming := v.GTE(types.VerbosityLevelVerbose) && !inParallel + + // should we show the timeline? + var timeline types.Timeline + showTimeline := !timelineHasBeenStreaming && (v.GTE(types.VerbosityLevelVerbose) || report.Failed()) + if showTimeline { + timeline = report.Timeline().WithoutHiddenReportEntries() + keepVeryVerboseSpecEvents := v.Is(types.VerbosityLevelVeryVerbose) || + (v.Is(types.VerbosityLevelVerbose) && r.conf.ShowNodeEvents) || + (report.Failed() && r.conf.ShowNodeEvents) + if !keepVeryVerboseSpecEvents { + timeline = timeline.WithoutVeryVerboseSpecEvents() + } + if len(timeline) == 0 && report.CapturedGinkgoWriterOutput == "" { + // the timeline is completely empty - don't show it + showTimeline = false + } + if v.LT(types.VerbosityLevelVeryVerbose) && report.CapturedGinkgoWriterOutput == "" && len(timeline) > 0 { + //if we aren't -vv and the timeline only has a single failure, don't show it as it will appear at the end of the report + failure, isFailure := timeline[0].(types.Failure) + if isFailure && (len(timeline) == 1 || (len(timeline) == 2 && failure.AdditionalFailure != nil)) { + showTimeline = false + } + } + } + + // should we have a separate section for always-visible reports? + showSeparateVisibilityAlwaysReportsSection := !timelineHasBeenStreaming && !showTimeline && report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways) + + // should we have a separate section for captured stdout/stderr + showSeparateStdSection := inParallel && (report.CapturedStdOutErr != "") + + // given all that - do we have any actual content to show? or are we a single denoter in a stream? + reportHasContent := v.Is(types.VerbosityLevelVeryVerbose) || showTimeline || showSeparateVisibilityAlwaysReportsSection || showSeparateStdSection || report.Failed() || (v.Is(types.VerbosityLevelVerbose) && !report.State.Is(types.SpecStateSkipped)) + + // should we show a runtime? + includeRuntime := !report.State.Is(types.SpecStateSkipped|types.SpecStatePending) || (report.State.Is(types.SpecStateSkipped) && report.Failure.Message != "") + + // should we show the codelocation block? + showCodeLocation := !timelineHasBeenStreaming || !report.State.Is(types.SpecStatePassed) + + switch report.State { + case types.SpecStatePassed: + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) && !reportHasContent { + return + } + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + header = fmt.Sprintf("%s PASSED", header) + } + if report.NumAttempts > 1 && report.MaxFlakeAttempts > 1 { + header, reportHasContent = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), true + } + case types.SpecStatePending: + header = "P" + if v.GT(types.VerbosityLevelSuccinct) { + header, reportHasContent = "P [PENDING]", true + } + case types.SpecStateSkipped: + header = "S" + if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && report.Failure.Message != "") { + header, reportHasContent = "S [SKIPPED]", true + } + default: + header = fmt.Sprintf("%s [%s]", header, r.humanReadableState(report.State)) + if report.MaxMustPassRepeatedly > 1 { + header = fmt.Sprintf("%s DURING REPETITION #%d", header, report.NumAttempts) + } + } + + // If we have no content to show, jsut emit the header and return + if !reportHasContent { + r.emit(r.f(highlightColor + header + "{{/}}")) + return + } + + if includeRuntime { + header = r.f("%s [%.3f seconds]", header, report.RunTime.Seconds()) + } + + // Emit header + if !timelineHasBeenStreaming { + r.emitDelimiter(0) + } + r.emitBlock(r.f(highlightColor + header + "{{/}}")) + if showCodeLocation { + r.emitBlock(r.codeLocationBlock(report, highlightColor, v.Is(types.VerbosityLevelVeryVerbose), false)) + } + + //Emit Stdout/Stderr Output + if showSeparateStdSection { + r.emitBlock("\n") + r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}")) + r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) + r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}")) + } + + if showSeparateVisibilityAlwaysReportsSection { + r.emitBlock("\n") + r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}")) + for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { + r.emitReportEntry(1, entry) + } + r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}")) + } + + if showTimeline { + r.emitBlock("\n") + r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}")) + r.emitTimeline(1, report, timeline) + r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}")) + } + + // Emit Failure Message + if !report.Failure.IsZero() && !v.Is(types.VerbosityLevelVeryVerbose) { + r.emitBlock("\n") + r.emitFailure(1, report.State, report.Failure, true) + if len(report.AdditionalFailures) > 0 { + r.emitBlock(r.fi(1, "\nThere were {{bold}}{{red}}additional failures{{/}} detected. To view them in detail run {{bold}}ginkgo -vv{{/}}")) + } + } + + r.emitDelimiter(0) +} + +func (r *DefaultReporter) highlightColorForState(state types.SpecState) string { + switch state { + case types.SpecStatePassed: + return "{{green}}" + case types.SpecStatePending: + return "{{yellow}}" + case types.SpecStateSkipped: + return "{{cyan}}" + case types.SpecStateFailed: + return "{{red}}" + case types.SpecStateTimedout: + return "{{orange}}" + case types.SpecStatePanicked: + return "{{magenta}}" + case types.SpecStateInterrupted: + return "{{orange}}" + case types.SpecStateAborted: + return "{{coral}}" + default: + return "{{gray}}" + } +} + +func (r *DefaultReporter) humanReadableState(state types.SpecState) string { + return strings.ToUpper(state.String()) +} + +func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, timeline types.Timeline) { + isVeryVerbose := r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose) + gw := report.CapturedGinkgoWriterOutput + cursor := 0 + for _, entry := range timeline { + tl := entry.GetTimelineLocation() + if tl.Offset < len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:tl.Offset])) + cursor = tl.Offset + } else if cursor < len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:])) + cursor = len(gw) + } + switch x := entry.(type) { + case types.Failure: + if isVeryVerbose { + r.emitFailure(indent, report.State, x, false) + } else { + r.emitShortFailure(indent, report.State, x) + } + case types.AdditionalFailure: + if isVeryVerbose { + r.emitFailure(indent, x.State, x.Failure, true) + } else { + r.emitShortFailure(indent, x.State, x.Failure) + } + case types.ReportEntry: + r.emitReportEntry(indent, x) + case types.ProgressReport: + r.emitProgressReport(indent, false, x) + case types.SpecEvent: + if isVeryVerbose || !x.IsOnlyVisibleAtVeryVerbose() || r.conf.ShowNodeEvents { + r.emitSpecEvent(indent, x, isVeryVerbose) + } + } + } + if cursor < len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:])) + } +} + +func (r *DefaultReporter) EmitFailure(state types.SpecState, failure types.Failure) { + if r.conf.Verbosity().Is(types.VerbosityLevelVerbose) { + r.emitShortFailure(1, state, failure) + } else if r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose) { + r.emitFailure(1, state, failure, true) + } +} + +func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, failure types.Failure) { + r.emitBlock(r.fi(indent, r.highlightColorForState(state)+"[%s]{{/}} in [%s] - %s {{gray}}@ %s{{/}}", + r.humanReadableState(state), + failure.FailureNodeType, + failure.Location, + failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), + )) +} + +func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) { + highlightColor := r.highlightColorForState(state) + r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message)) + r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + if failure.ForwardedPanic != "" { + r.emitBlock("\n") + r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic)) + } + + if r.conf.FullTrace || failure.ForwardedPanic != "" { + r.emitBlock("\n") + r.emitBlock(r.fi(indent, highlightColor+"Full Stack Trace{{/}}")) + r.emitBlock(r.fi(indent+1, "%s", failure.Location.FullStackTrace)) + } + + if !failure.ProgressReport.IsZero() { + r.emitBlock("\n") + r.emitProgressReport(indent, false, failure.ProgressReport) + } + + if failure.AdditionalFailure != nil && includeAdditionalFailure { + r.emitBlock("\n") + r.emitFailure(indent, failure.AdditionalFailure.State, failure.AdditionalFailure.Failure, true) + } +} + +func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) { + r.emitDelimiter(1) + + if report.RunningInParallel { + r.emit(r.fi(1, "{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess)) + } + shouldEmitGW := report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose) + r.emitProgressReport(1, shouldEmitGW, report) + r.emitDelimiter(1) +} + +func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) { + if report.Message != "" { + r.emitBlock(r.fi(indent, report.Message+"\n")) + indent += 1 + } + if report.LeafNodeText != "" { + subjectIndent := indent + if len(report.ContainerHierarchyTexts) > 0 { + r.emit(r.fi(indent, r.cycleJoin(report.ContainerHierarchyTexts, " "))) + r.emit(" ") + subjectIndent = 0 + } + r.emit(r.fi(subjectIndent, "{{bold}}{{orange}}%s{{/}} (Spec Runtime: %s)\n", report.LeafNodeText, report.Time().Sub(report.SpecStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.LeafNodeLocation)) + indent += 1 + } + if report.CurrentNodeType != types.NodeTypeInvalid { + r.emit(r.fi(indent, "In {{bold}}{{orange}}[%s]{{/}}", report.CurrentNodeType)) + if report.CurrentNodeText != "" && !report.CurrentNodeType.Is(types.NodeTypeIt) { + r.emit(r.f(" {{bold}}{{orange}}%s{{/}}", report.CurrentNodeText)) + } + + r.emit(r.f(" (Node Runtime: %s)\n", report.Time().Sub(report.CurrentNodeStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentNodeLocation)) + indent += 1 + } + if report.CurrentStepText != "" { + r.emit(r.fi(indent, "At {{bold}}{{orange}}[By Step] %s{{/}} (Step Runtime: %s)\n", report.CurrentStepText, report.Time().Sub(report.CurrentStepStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentStepLocation)) + indent += 1 + } + + if indent > 0 { + indent -= 1 + } + + if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" { + r.emit("\n") + r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}")) + limit, lines := 10, strings.Split(report.CapturedGinkgoWriterOutput, "\n") + if len(lines) <= limit { + r.emitBlock(r.fi(indent+1, "%s", report.CapturedGinkgoWriterOutput)) + } else { + r.emitBlock(r.fi(indent+1, "{{gray}}...{{/}}")) + for _, line := range lines[len(lines)-limit-1:] { + r.emitBlock(r.fi(indent+1, "%s", line)) + } + } + r.emitBlock(r.fi(indent, "{{gray}}<< End Captured GinkgoWriter Output{{/}}")) + } + + if !report.SpecGoroutine().IsZero() { + r.emit("\n") + r.emit(r.fi(indent, "{{bold}}{{underline}}Spec Goroutine{{/}}\n")) + r.emitGoroutines(indent, report.SpecGoroutine()) + } + + if len(report.AdditionalReports) > 0 { + r.emit("\n") + r.emitBlock(r.fi(indent, "{{gray}}Begin Additional Progress Reports >>{{/}}")) + for i, additionalReport := range report.AdditionalReports { + r.emit(r.fi(indent+1, additionalReport)) + if i < len(report.AdditionalReports)-1 { + r.emitBlock(r.fi(indent+1, "{{gray}}%s{{/}}", strings.Repeat("-", 10))) + } + } + r.emitBlock(r.fi(indent, "{{gray}}<< End Additional Progress Reports{{/}}")) + } + + highlightedGoroutines := report.HighlightedGoroutines() + if len(highlightedGoroutines) > 0 { + r.emit("\n") + r.emit(r.fi(indent, "{{bold}}{{underline}}Goroutines of Interest{{/}}\n")) + r.emitGoroutines(indent, highlightedGoroutines...) + } + + otherGoroutines := report.OtherGoroutines() + if len(otherGoroutines) > 0 { + r.emit("\n") + r.emit(r.fi(indent, "{{gray}}{{bold}}{{underline}}Other Goroutines{{/}}\n")) + r.emitGoroutines(indent, otherGoroutines...) + } +} + +func (r *DefaultReporter) EmitReportEntry(entry types.ReportEntry) { + if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || entry.Visibility == types.ReportEntryVisibilityNever { + return + } + r.emitReportEntry(1, entry) +} + +func (r *DefaultReporter) emitReportEntry(indent uint, entry types.ReportEntry) { + r.emitBlock(r.fi(indent, "{{bold}}"+entry.Name+"{{gray}} "+fmt.Sprintf("- %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT)))) + if representation := entry.StringRepresentation(); representation != "" { + r.emitBlock(r.fi(indent+1, representation)) + } +} + +func (r *DefaultReporter) EmitSpecEvent(event types.SpecEvent) { + v := r.conf.Verbosity() + if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && (r.conf.ShowNodeEvents || !event.IsOnlyVisibleAtVeryVerbose())) { + r.emitSpecEvent(1, event, r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose)) + } +} + +func (r *DefaultReporter) emitSpecEvent(indent uint, event types.SpecEvent, includeLocation bool) { + location := "" + if includeLocation { + location = fmt.Sprintf("- %s ", event.CodeLocation.String()) + } + switch event.SpecEventType { + case types.SpecEventInvalid: + return + case types.SpecEventByStart: + r.emitBlock(r.fi(indent, "{{bold}}STEP:{{/}} %s {{gray}}%s@ %s{{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + case types.SpecEventByEnd: + r.emitBlock(r.fi(indent, "{{bold}}END STEP:{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond))) + case types.SpecEventNodeStart: + r.emitBlock(r.fi(indent, "> Enter {{bold}}[%s]{{/}} %s {{gray}}%s@ %s{{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + case types.SpecEventNodeEnd: + r.emitBlock(r.fi(indent, "< Exit {{bold}}[%s]{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond))) + case types.SpecEventSpecRepeat: + r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{green}}Passed{{/}}{{bold}}. Repeating %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + case types.SpecEventSpecRetry: + r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{red}}Failed{{/}}{{bold}}. Retrying %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } +} + +func (r *DefaultReporter) emitGoroutines(indent uint, goroutines ...types.Goroutine) { + for idx, g := range goroutines { + color := "{{gray}}" + if g.HasHighlights() { + color = "{{orange}}" + } + r.emit(r.fi(indent, color+"goroutine %d [%s]{{/}}\n", g.ID, g.State)) + for _, fc := range g.Stack { + if fc.Highlight { + r.emit(r.fi(indent, color+"{{bold}}> %s{{/}}\n", fc.Function)) + r.emit(r.fi(indent+2, color+"{{bold}}%s:%d{{/}}\n", fc.Filename, fc.Line)) + r.emitSource(indent+3, fc) + } else { + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", fc.Function)) + r.emit(r.fi(indent+2, "{{gray}}%s:%d{{/}}\n", fc.Filename, fc.Line)) + } + } + + if idx+1 < len(goroutines) { + r.emit("\n") + } + } +} + +func (r *DefaultReporter) emitSource(indent uint, fc types.FunctionCall) { + lines := fc.Source + if len(lines) == 0 { + return + } + + lTrim := 100000 + for _, line := range lines { + lTrimLine := len(line) - len(strings.TrimLeft(line, " \t")) + if lTrimLine < lTrim && len(line) > 0 { + lTrim = lTrimLine + } + } + if lTrim == 100000 { + lTrim = 0 + } + + for idx, line := range lines { + if len(line) > lTrim { + line = line[lTrim:] + } + if idx == fc.SourceHighlight { + r.emit(r.fi(indent, "{{bold}}{{orange}}> %s{{/}}\n", line)) + } else { + r.emit(r.fi(indent, "| %s\n", line)) + } + } +} + +/* Emitting to the writer */ +func (r *DefaultReporter) emit(s string) { + r._emit(s, false, false) +} + +func (r *DefaultReporter) emitBlock(s string) { + r._emit(s, true, false) +} + +func (r *DefaultReporter) emitDelimiter(indent uint) { + r._emit(r.fi(indent, "{{gray}}%s{{/}}", strings.Repeat("-", 30)), true, true) +} + +// a bit ugly - but we're trying to minimize locking on this hot codepath +func (r *DefaultReporter) _emit(s string, block bool, isDelimiter bool) { + if len(s) == 0 { + return + } + r.lock.Lock() + defer r.lock.Unlock() + if isDelimiter && r.lastEmissionWasDelimiter { + return + } + if block && !r.lastCharWasNewline { + r.writer.Write([]byte("\n")) + } + r.lastCharWasNewline = (s[len(s)-1:] == "\n") + r.writer.Write([]byte(s)) + if block && !r.lastCharWasNewline { + r.writer.Write([]byte("\n")) + r.lastCharWasNewline = true + } + r.lastEmissionWasDelimiter = isDelimiter +} + +/* Rendering text */ +func (r *DefaultReporter) f(format string, args ...interface{}) string { + return r.formatter.F(format, args...) +} + +func (r *DefaultReporter) fi(indentation uint, format string, args ...interface{}) string { + return r.formatter.Fi(indentation, format, args...) +} + +func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string { + return r.formatter.CycleJoin(elements, joiner, []string{"{{/}}", "{{gray}}"}) +} + +func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, veryVerbose bool, usePreciseFailureLocation bool) string { + texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{} + texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...) + + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { + texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText)) + } else { + texts = append(texts, r.f(report.LeafNodeText)) + } + labels = append(labels, report.LeafNodeLabels) + locations = append(locations, report.LeafNodeLocation) + + failureLocation := report.Failure.FailureNodeLocation + if usePreciseFailureLocation { + failureLocation = report.Failure.Location + } + + highlightIndex := -1 + switch report.Failure.FailureNodeContext { + case types.FailureNodeAtTopLevel: + texts = append([]string{fmt.Sprintf("TOP-LEVEL [%s]", report.Failure.FailureNodeType)}, texts...) + locations = append([]types.CodeLocation{failureLocation}, locations...) + labels = append([][]string{{}}, labels...) + highlightIndex = 0 + case types.FailureNodeInContainer: + i := report.Failure.FailureNodeContainerIndex + texts[i] = fmt.Sprintf("%s [%s]", texts[i], report.Failure.FailureNodeType) + locations[i] = failureLocation + highlightIndex = i + case types.FailureNodeIsLeafNode: + i := len(texts) - 1 + texts[i] = fmt.Sprintf("[%s] %s", report.LeafNodeType, report.LeafNodeText) + locations[i] = failureLocation + highlightIndex = i + default: + //there is no failure, so we highlight the leaf ndoe + highlightIndex = len(texts) - 1 + } + + out := "" + if veryVerbose { + for i := range texts { + if i == highlightIndex { + out += r.fi(uint(i), highlightColor+"{{bold}}%s{{/}}", texts[i]) + } else { + out += r.fi(uint(i), "%s", texts[i]) + } + if len(labels[i]) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", ")) + } + out += "\n" + out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i]) + } + } else { + for i := range texts { + style := "{{/}}" + if i%2 == 1 { + style = "{{gray}}" + } + if i == highlightIndex { + style = highlightColor + "{{bold}}" + } + out += r.f(style+"%s", texts[i]) + if i < len(texts)-1 { + out += " " + } else { + out += r.f("{{/}}") + } + } + flattenedLabels := report.Labels() + if len(flattenedLabels) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", ")) + } + out += "\n" + if usePreciseFailureLocation { + out += r.f("{{gray}}%s{{/}}", failureLocation) + } else { + leafLocation := locations[len(locations)-1] + if (report.Failure.FailureNodeLocation != types.CodeLocation{}) && (report.Failure.FailureNodeLocation != leafLocation) { + out += r.fi(1, highlightColor+"[%s]{{/}} {{gray}}%s{{/}}\n", report.Failure.FailureNodeType, report.Failure.FailureNodeLocation) + out += r.fi(1, "{{gray}}[%s] %s{{/}}", report.LeafNodeType, leafLocation) + } else { + out += r.f("{{gray}}%s{{/}}", leafLocation) + } + } + + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go new file mode 100644 index 00000000..613072eb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go @@ -0,0 +1,149 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/v2/config" + "github.com/onsi/ginkgo/v2/types" +) + +// Deprecated: DeprecatedReporter was how Ginkgo V1 provided support for CustomReporters +// this has been removed in V2. +// Please read the documentation at: +// https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters +// for Ginkgo's new behavior and for a migration path. +type DeprecatedReporter interface { + SuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) + BeforeSuiteDidRun(setupSummary *types.SetupSummary) + SpecWillRun(specSummary *types.SpecSummary) + SpecDidComplete(specSummary *types.SpecSummary) + AfterSuiteDidRun(setupSummary *types.SetupSummary) + SuiteDidEnd(summary *types.SuiteSummary) +} + +// ReportViaDeprecatedReporter takes a V1 custom reporter and a V2 report and +// calls the custom reporter's methods with appropriately transformed data from the V2 report. +// +// ReportViaDeprecatedReporter should be called in a `ReportAfterSuite()` +// +// Deprecated: ReportViaDeprecatedReporter method exists to help developer bridge between deprecated V1 functionality and the new +// reporting support in V2. It will be removed in a future minor version of Ginkgo. +func ReportViaDeprecatedReporter(reporter DeprecatedReporter, report types.Report) { + conf := config.DeprecatedGinkgoConfigType{ + RandomSeed: report.SuiteConfig.RandomSeed, + RandomizeAllSpecs: report.SuiteConfig.RandomizeAllSpecs, + FocusStrings: report.SuiteConfig.FocusStrings, + SkipStrings: report.SuiteConfig.SkipStrings, + FailOnPending: report.SuiteConfig.FailOnPending, + FailFast: report.SuiteConfig.FailFast, + FlakeAttempts: report.SuiteConfig.FlakeAttempts, + EmitSpecProgress: false, + DryRun: report.SuiteConfig.DryRun, + ParallelNode: report.SuiteConfig.ParallelProcess, + ParallelTotal: report.SuiteConfig.ParallelTotal, + SyncHost: report.SuiteConfig.ParallelHost, + StreamHost: report.SuiteConfig.ParallelHost, + } + + summary := &types.DeprecatedSuiteSummary{ + SuiteDescription: report.SuiteDescription, + SuiteID: report.SuitePath, + + NumberOfSpecsBeforeParallelization: report.PreRunStats.TotalSpecs, + NumberOfTotalSpecs: report.PreRunStats.TotalSpecs, + NumberOfSpecsThatWillBeRun: report.PreRunStats.SpecsThatWillRun, + } + + reporter.SuiteWillBegin(conf, summary) + + for _, spec := range report.SpecReports { + switch spec.LeafNodeType { + case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite: + setupSummary := &types.DeprecatedSetupSummary{ + ComponentType: spec.LeafNodeType, + CodeLocation: spec.LeafNodeLocation, + State: spec.State, + RunTime: spec.RunTime, + Failure: failureFor(spec), + CapturedOutput: spec.CombinedOutput(), + SuiteID: report.SuitePath, + } + reporter.BeforeSuiteDidRun(setupSummary) + case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite: + setupSummary := &types.DeprecatedSetupSummary{ + ComponentType: spec.LeafNodeType, + CodeLocation: spec.LeafNodeLocation, + State: spec.State, + RunTime: spec.RunTime, + Failure: failureFor(spec), + CapturedOutput: spec.CombinedOutput(), + SuiteID: report.SuitePath, + } + reporter.AfterSuiteDidRun(setupSummary) + case types.NodeTypeIt: + componentTexts, componentCodeLocations := []string{}, []types.CodeLocation{} + componentTexts = append(componentTexts, spec.ContainerHierarchyTexts...) + componentCodeLocations = append(componentCodeLocations, spec.ContainerHierarchyLocations...) + componentTexts = append(componentTexts, spec.LeafNodeText) + componentCodeLocations = append(componentCodeLocations, spec.LeafNodeLocation) + + specSummary := &types.DeprecatedSpecSummary{ + ComponentTexts: componentTexts, + ComponentCodeLocations: componentCodeLocations, + State: spec.State, + RunTime: spec.RunTime, + Failure: failureFor(spec), + NumberOfSamples: spec.NumAttempts, + CapturedOutput: spec.CombinedOutput(), + SuiteID: report.SuitePath, + } + reporter.SpecWillRun(specSummary) + reporter.SpecDidComplete(specSummary) + + switch spec.State { + case types.SpecStatePending: + summary.NumberOfPendingSpecs += 1 + case types.SpecStateSkipped: + summary.NumberOfSkippedSpecs += 1 + case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateInterrupted: + summary.NumberOfFailedSpecs += 1 + case types.SpecStatePassed: + summary.NumberOfPassedSpecs += 1 + if spec.NumAttempts > 1 { + summary.NumberOfFlakedSpecs += 1 + } + } + } + } + + summary.SuiteSucceeded = report.SuiteSucceeded + summary.RunTime = report.RunTime + + reporter.SuiteDidEnd(summary) +} + +func failureFor(spec types.SpecReport) types.DeprecatedSpecFailure { + if spec.Failure.IsZero() { + return types.DeprecatedSpecFailure{} + } + + index := 0 + switch spec.Failure.FailureNodeContext { + case types.FailureNodeInContainer: + index = spec.Failure.FailureNodeContainerIndex + case types.FailureNodeAtTopLevel: + index = -1 + case types.FailureNodeIsLeafNode: + index = len(spec.ContainerHierarchyTexts) - 1 + if spec.LeafNodeText != "" { + index += 1 + } + } + + return types.DeprecatedSpecFailure{ + Message: spec.Failure.Message, + Location: spec.Failure.Location, + ForwardedPanic: spec.Failure.ForwardedPanic, + ComponentIndex: index, + ComponentType: spec.Failure.FailureNodeType, + ComponentCodeLocation: spec.Failure.FailureNodeLocation, + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go new file mode 100644 index 00000000..5d3e8db9 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go @@ -0,0 +1,69 @@ +package reporters + +import ( + "encoding/json" + "fmt" + "os" + "path" + + "github.com/onsi/ginkgo/v2/types" +) + +// GenerateJSONReport produces a JSON-formatted report at the passed in destination +func GenerateJSONReport(report types.Report, destination string) error { + if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { + return err + } + f, err := os.Create(destination) + if err != nil { + return err + } + defer f.Close() + enc := json.NewEncoder(f) + enc.SetIndent("", " ") + err = enc.Encode([]types.Report{ + report, + }) + if err != nil { + return err + } + return nil +} + +// MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources +// It skips over reports that fail to decode but reports on them via the returned messages []string +func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, error) { + messages := []string{} + allReports := []types.Report{} + for _, source := range sources { + reports := []types.Report{} + data, err := os.ReadFile(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + err = json.Unmarshal(data, &reports) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) + continue + } + os.Remove(source) + allReports = append(allReports, reports...) + } + + if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { + return messages, err + } + f, err := os.Create(destination) + if err != nil { + return messages, err + } + defer f.Close() + enc := json.NewEncoder(f) + enc.SetIndent("", " ") + err = enc.Encode(allReports) + if err != nil { + return messages, err + } + return messages, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go new file mode 100644 index 00000000..81604220 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -0,0 +1,376 @@ +/* + +JUnit XML Reporter for Ginkgo + +For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output + +The schema used for the generated JUnit xml file was adapted from https://llg.cubic.org/docs/junit/ + +*/ + +package reporters + +import ( + "encoding/xml" + "fmt" + "os" + "path" + "strings" + + "github.com/onsi/ginkgo/v2/config" + "github.com/onsi/ginkgo/v2/types" +) + +type JunitReportConfig struct { + // Spec States for which no timeline should be emitted for system-err + // set this to types.SpecStatePassed|types.SpecStateSkipped|types.SpecStatePending to only match failing specs + OmitTimelinesForSpecState types.SpecState + + // Enable OmitFailureMessageAttr to prevent failure messages appearing in the "message" attribute of the Failure and Error tags + OmitFailureMessageAttr bool + + //Enable OmitCapturedStdOutErr to prevent captured stdout/stderr appearing in system-out + OmitCapturedStdOutErr bool + + // Enable OmitSpecLabels to prevent labels from appearing in the spec name + OmitSpecLabels bool + + // Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name + OmitLeafNodeType bool + + // Enable OmitSuiteSetupNodes to prevent the creation of testcase entries for setup nodes + OmitSuiteSetupNodes bool +} + +type JUnitTestSuites struct { + XMLName xml.Name `xml:"testsuites"` + // Tests maps onto the total number of specs in all test suites (this includes any suite nodes such as BeforeSuite) + Tests int `xml:"tests,attr"` + // Disabled maps onto specs that are pending and/or skipped + Disabled int `xml:"disabled,attr"` + // Errors maps onto specs that panicked or were interrupted + Errors int `xml:"errors,attr"` + // Failures maps onto specs that failed + Failures int `xml:"failures,attr"` + // Time is the time in seconds to execute all test suites + Time float64 `xml:"time,attr"` + + //The set of all test suites + TestSuites []JUnitTestSuite `xml:"testsuite"` +} + +type JUnitTestSuite struct { + // Name maps onto the description of the test suite - maps onto Report.SuiteDescription + Name string `xml:"name,attr"` + // Package maps onto the absolute path to the test suite - maps onto Report.SuitePath + Package string `xml:"package,attr"` + // Tests maps onto the total number of specs in the test suite (this includes any suite nodes such as BeforeSuite) + Tests int `xml:"tests,attr"` + // Disabled maps onto specs that are pending + Disabled int `xml:"disabled,attr"` + // Skiped maps onto specs that are skipped + Skipped int `xml:"skipped,attr"` + // Errors maps onto specs that panicked or were interrupted + Errors int `xml:"errors,attr"` + // Failures maps onto specs that failed + Failures int `xml:"failures,attr"` + // Time is the time in seconds to execute all the test suite - maps onto Report.RunTime + Time float64 `xml:"time,attr"` + // Timestamp is the ISO 8601 formatted start-time of the suite - maps onto Report.StartTime + Timestamp string `xml:"timestamp,attr"` + + //Properties captures the information stored in the rest of the Report type (including SuiteConfig) as key-value pairs + Properties JUnitProperties `xml:"properties"` + + //TestCases capture the individual specs + TestCases []JUnitTestCase `xml:"testcase"` +} + +type JUnitProperties struct { + Properties []JUnitProperty `xml:"property"` +} + +func (jup JUnitProperties) WithName(name string) string { + for _, property := range jup.Properties { + if property.Name == name { + return property.Value + } + } + return "" +} + +type JUnitProperty struct { + Name string `xml:"name,attr"` + Value string `xml:"value,attr"` +} + +type JUnitTestCase struct { + // Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()" + Name string `xml:"name,attr"` + // Classname maps onto the name of the test suite - equivalent to Report.SuiteDescription + Classname string `xml:"classname,attr"` + // Status maps onto the string representation of SpecReport.State + Status string `xml:"status,attr"` + // Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime + Time float64 `xml:"time,attr"` + //Skipped is populated with a message if the test was skipped or pending + Skipped *JUnitSkipped `xml:"skipped,omitempty"` + //Error is populated if the test panicked or was interrupted + Error *JUnitError `xml:"error,omitempty"` + //Failure is populated if the test failed + Failure *JUnitFailure `xml:"failure,omitempty"` + //SystemOut maps onto any captured stdout/stderr output - maps onto SpecReport.CapturedStdOutErr + SystemOut string `xml:"system-out,omitempty"` + //SystemOut maps onto any captured GinkgoWriter output - maps onto SpecReport.CapturedGinkgoWriterOutput + SystemErr string `xml:"system-err,omitempty"` +} + +type JUnitSkipped struct { + // Message maps onto "pending" if the test was marked pending, "skipped" if the test was marked skipped, and "skipped - REASON" if the user called Skip(REASON) + Message string `xml:"message,attr"` +} + +type JUnitError struct { + //Message maps onto the panic/exception thrown - equivalent to SpecReport.Failure.ForwardedPanic - or to "interrupted" + Message string `xml:"message,attr"` + //Type is one of "panicked" or "interrupted" + Type string `xml:"type,attr"` + //Description maps onto the captured stack trace for a panic, or the failure message for an interrupt which will include the dump of running goroutines + Description string `xml:",chardata"` +} + +type JUnitFailure struct { + //Message maps onto the failure message - equivalent to SpecReport.Failure.Message + Message string `xml:"message,attr"` + //Type is "failed" + Type string `xml:"type,attr"` + //Description maps onto the location and stack trace of the failure + Description string `xml:",chardata"` +} + +func GenerateJUnitReport(report types.Report, dst string) error { + return GenerateJUnitReportWithConfig(report, dst, JunitReportConfig{}) +} + +func GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig) error { + suite := JUnitTestSuite{ + Name: report.SuiteDescription, + Package: report.SuitePath, + Time: report.RunTime.Seconds(), + Timestamp: report.StartTime.Format("2006-01-02T15:04:05"), + Properties: JUnitProperties{ + Properties: []JUnitProperty{ + {"SuiteSucceeded", fmt.Sprintf("%t", report.SuiteSucceeded)}, + {"SuiteHasProgrammaticFocus", fmt.Sprintf("%t", report.SuiteHasProgrammaticFocus)}, + {"SpecialSuiteFailureReason", strings.Join(report.SpecialSuiteFailureReasons, ",")}, + {"SuiteLabels", fmt.Sprintf("[%s]", strings.Join(report.SuiteLabels, ","))}, + {"RandomSeed", fmt.Sprintf("%d", report.SuiteConfig.RandomSeed)}, + {"RandomizeAllSpecs", fmt.Sprintf("%t", report.SuiteConfig.RandomizeAllSpecs)}, + {"LabelFilter", report.SuiteConfig.LabelFilter}, + {"FocusStrings", strings.Join(report.SuiteConfig.FocusStrings, ",")}, + {"SkipStrings", strings.Join(report.SuiteConfig.SkipStrings, ",")}, + {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")}, + {"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")}, + {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)}, + {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)}, + {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)}, + {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)}, + {"ParallelTotal", fmt.Sprintf("%d", report.SuiteConfig.ParallelTotal)}, + {"OutputInterceptorMode", report.SuiteConfig.OutputInterceptorMode}, + }, + }, + } + for _, spec := range report.SpecReports { + if config.OmitSuiteSetupNodes && spec.LeafNodeType != types.NodeTypeIt { + continue + } + name := fmt.Sprintf("[%s]", spec.LeafNodeType) + if config.OmitLeafNodeType { + name = "" + } + if spec.FullText() != "" { + name = name + " " + spec.FullText() + } + labels := spec.Labels() + if len(labels) > 0 && !config.OmitSpecLabels { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + name = strings.TrimSpace(name) + + test := JUnitTestCase{ + Name: name, + Classname: report.SuiteDescription, + Status: spec.State.String(), + Time: spec.RunTime.Seconds(), + } + if !spec.State.Is(config.OmitTimelinesForSpecState) { + test.SystemErr = systemErrForUnstructuredReporters(spec) + } + if !config.OmitCapturedStdOutErr { + test.SystemOut = systemOutForUnstructuredReporters(spec) + } + suite.Tests += 1 + + switch spec.State { + case types.SpecStateSkipped: + message := "skipped" + if spec.Failure.Message != "" { + message += " - " + spec.Failure.Message + } + test.Skipped = &JUnitSkipped{Message: message} + suite.Skipped += 1 + case types.SpecStatePending: + test.Skipped = &JUnitSkipped{Message: "pending"} + suite.Disabled += 1 + case types.SpecStateFailed: + test.Failure = &JUnitFailure{ + Message: spec.Failure.Message, + Type: "failed", + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Failure.Message = "" + } + suite.Failures += 1 + case types.SpecStateTimedout: + test.Failure = &JUnitFailure{ + Message: spec.Failure.Message, + Type: "timedout", + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Failure.Message = "" + } + suite.Failures += 1 + case types.SpecStateInterrupted: + test.Error = &JUnitError{ + Message: spec.Failure.Message, + Type: "interrupted", + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Error.Message = "" + } + suite.Errors += 1 + case types.SpecStateAborted: + test.Failure = &JUnitFailure{ + Message: spec.Failure.Message, + Type: "aborted", + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Failure.Message = "" + } + suite.Errors += 1 + case types.SpecStatePanicked: + test.Error = &JUnitError{ + Message: spec.Failure.ForwardedPanic, + Type: "panicked", + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Error.Message = "" + } + suite.Errors += 1 + } + + suite.TestCases = append(suite.TestCases, test) + } + + junitReport := JUnitTestSuites{ + Tests: suite.Tests, + Disabled: suite.Disabled + suite.Skipped, + Errors: suite.Errors, + Failures: suite.Failures, + Time: suite.Time, + TestSuites: []JUnitTestSuite{suite}, + } + + if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { + return err + } + f, err := os.Create(dst) + if err != nil { + return err + } + f.WriteString(xml.Header) + encoder := xml.NewEncoder(f) + encoder.Indent(" ", " ") + encoder.Encode(junitReport) + + return f.Close() +} + +func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) { + messages := []string{} + mergedReport := JUnitTestSuites{} + for _, source := range sources { + report := JUnitTestSuites{} + f, err := os.Open(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + err = xml.NewDecoder(f).Decode(&report) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) + continue + } + os.Remove(source) + + mergedReport.Tests += report.Tests + mergedReport.Disabled += report.Disabled + mergedReport.Errors += report.Errors + mergedReport.Failures += report.Failures + mergedReport.Time += report.Time + mergedReport.TestSuites = append(mergedReport.TestSuites, report.TestSuites...) + } + + if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { + return messages, err + } + f, err := os.Create(dst) + if err != nil { + return messages, err + } + f.WriteString(xml.Header) + encoder := xml.NewEncoder(f) + encoder.Indent(" ", " ") + encoder.Encode(mergedReport) + + return messages, f.Close() +} + +func failureDescriptionForUnstructuredReporters(spec types.SpecReport) string { + out := &strings.Builder{} + NewDefaultReporter(types.ReporterConfig{NoColor: true, VeryVerbose: true}, out).emitFailure(0, spec.State, spec.Failure, true) + if len(spec.AdditionalFailures) > 0 { + out.WriteString("\nThere were additional failures detected after the initial failure. These are visible in the timeline\n") + } + return out.String() +} + +func systemErrForUnstructuredReporters(spec types.SpecReport) string { + return RenderTimeline(spec, true) +} + +func RenderTimeline(spec types.SpecReport, noColor bool) string { + out := &strings.Builder{} + NewDefaultReporter(types.ReporterConfig{NoColor: noColor, VeryVerbose: true}, out).emitTimeline(0, spec, spec.Timeline()) + return out.String() +} + +func systemOutForUnstructuredReporters(spec types.SpecReport) string { + return spec.CapturedStdOutErr +} + +// Deprecated JUnitReporter (so folks can still compile their suites) +type JUnitReporter struct{} + +func NewJUnitReporter(_ string) *JUnitReporter { return &JUnitReporter{} } +func (reporter *JUnitReporter) SuiteWillBegin(_ config.GinkgoConfigType, _ *types.SuiteSummary) {} +func (reporter *JUnitReporter) BeforeSuiteDidRun(_ *types.SetupSummary) {} +func (reporter *JUnitReporter) SpecWillRun(_ *types.SpecSummary) {} +func (reporter *JUnitReporter) SpecDidComplete(_ *types.SpecSummary) {} +func (reporter *JUnitReporter) AfterSuiteDidRun(_ *types.SetupSummary) {} +func (reporter *JUnitReporter) SuiteDidEnd(_ *types.SuiteSummary) {} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go new file mode 100644 index 00000000..5e726c46 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go @@ -0,0 +1,29 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/v2/types" +) + +type Reporter interface { + SuiteWillBegin(report types.Report) + WillRun(report types.SpecReport) + DidRun(report types.SpecReport) + SuiteDidEnd(report types.Report) + + //Timeline emission + EmitFailure(state types.SpecState, failure types.Failure) + EmitProgressReport(progressReport types.ProgressReport) + EmitReportEntry(entry types.ReportEntry) + EmitSpecEvent(event types.SpecEvent) +} + +type NoopReporter struct{} + +func (n NoopReporter) SuiteWillBegin(report types.Report) {} +func (n NoopReporter) WillRun(report types.SpecReport) {} +func (n NoopReporter) DidRun(report types.SpecReport) {} +func (n NoopReporter) SuiteDidEnd(report types.Report) {} +func (n NoopReporter) EmitFailure(state types.SpecState, failure types.Failure) {} +func (n NoopReporter) EmitProgressReport(progressReport types.ProgressReport) {} +func (n NoopReporter) EmitReportEntry(entry types.ReportEntry) {} +func (n NoopReporter) EmitSpecEvent(event types.SpecEvent) {} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go new file mode 100644 index 00000000..e990ad82 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go @@ -0,0 +1,105 @@ +/* + +TeamCity Reporter for Ginkgo + +Makes use of TeamCity's support for Service Messages +http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests +*/ + +package reporters + +import ( + "fmt" + "os" + "path" + "strings" + + "github.com/onsi/ginkgo/v2/types" +) + +func tcEscape(s string) string { + s = strings.ReplaceAll(s, "|", "||") + s = strings.ReplaceAll(s, "'", "|'") + s = strings.ReplaceAll(s, "\n", "|n") + s = strings.ReplaceAll(s, "\r", "|r") + s = strings.ReplaceAll(s, "[", "|[") + s = strings.ReplaceAll(s, "]", "|]") + return s +} + +func GenerateTeamcityReport(report types.Report, dst string) error { + if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { + return err + } + f, err := os.Create(dst) + if err != nil { + return err + } + + name := report.SuiteDescription + labels := report.SuiteLabels + if len(labels) > 0 { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + fmt.Fprintf(f, "##teamcity[testSuiteStarted name='%s']\n", tcEscape(name)) + for _, spec := range report.SpecReports { + name := fmt.Sprintf("[%s]", spec.LeafNodeType) + if spec.FullText() != "" { + name = name + " " + spec.FullText() + } + labels := spec.Labels() + if len(labels) > 0 { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + + name = tcEscape(name) + fmt.Fprintf(f, "##teamcity[testStarted name='%s']\n", name) + switch spec.State { + case types.SpecStatePending: + fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='pending']\n", name) + case types.SpecStateSkipped: + message := "skipped" + if spec.Failure.Message != "" { + message += " - " + spec.Failure.Message + } + fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='%s']\n", name, tcEscape(message)) + case types.SpecStateFailed: + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='failed - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) + case types.SpecStatePanicked: + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='panicked - %s' details='%s']\n", name, tcEscape(spec.Failure.ForwardedPanic), tcEscape(details)) + case types.SpecStateTimedout: + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='timedout - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) + case types.SpecStateInterrupted: + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='interrupted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) + case types.SpecStateAborted: + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='aborted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) + } + + fmt.Fprintf(f, "##teamcity[testStdOut name='%s' out='%s']\n", name, tcEscape(systemOutForUnstructuredReporters(spec))) + fmt.Fprintf(f, "##teamcity[testStdErr name='%s' out='%s']\n", name, tcEscape(systemErrForUnstructuredReporters(spec))) + fmt.Fprintf(f, "##teamcity[testFinished name='%s' duration='%d']\n", name, int(spec.RunTime.Seconds()*1000.0)) + } + fmt.Fprintf(f, "##teamcity[testSuiteFinished name='%s']\n", tcEscape(report.SuiteDescription)) + + return f.Close() +} + +func MergeAndCleanupTeamcityReports(sources []string, dst string) ([]string, error) { + messages := []string{} + merged := []byte{} + for _, source := range sources { + data, err := os.ReadFile(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + os.Remove(source) + merged = append(merged, data...) + } + return messages, os.WriteFile(dst, merged, 0666) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go new file mode 100644 index 00000000..57e87517 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go @@ -0,0 +1,159 @@ +package types + +import ( + "fmt" + "os" + "regexp" + "runtime" + "runtime/debug" + "strings" + "sync" +) + +type CodeLocation struct { + FileName string `json:",omitempty"` + LineNumber int `json:",omitempty"` + FullStackTrace string `json:",omitempty"` + CustomMessage string `json:",omitempty"` +} + +func (codeLocation CodeLocation) String() string { + if codeLocation.CustomMessage != "" { + return codeLocation.CustomMessage + } + return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber) +} + +func (codeLocation CodeLocation) ContentsOfLine() string { + if codeLocation.CustomMessage != "" { + return "" + } + contents, err := os.ReadFile(codeLocation.FileName) + if err != nil { + return "" + } + lines := strings.Split(string(contents), "\n") + if len(lines) < codeLocation.LineNumber { + return "" + } + return lines[codeLocation.LineNumber-1] +} + +type codeLocationLocator struct { + pcs map[uintptr]bool + helpers map[string]bool + lock *sync.Mutex +} + +func (c *codeLocationLocator) addHelper(pc uintptr) { + c.lock.Lock() + defer c.lock.Unlock() + + if c.pcs[pc] { + return + } + c.lock.Unlock() + f := runtime.FuncForPC(pc) + c.lock.Lock() + if f == nil { + return + } + c.helpers[f.Name()] = true + c.pcs[pc] = true +} + +func (c *codeLocationLocator) hasHelper(name string) bool { + c.lock.Lock() + defer c.lock.Unlock() + return c.helpers[name] +} + +func (c *codeLocationLocator) getCodeLocation(skip int) CodeLocation { + pc := make([]uintptr, 40) + n := runtime.Callers(skip+2, pc) + if n == 0 { + return CodeLocation{} + } + pc = pc[:n] + frames := runtime.CallersFrames(pc) + for { + frame, more := frames.Next() + if !c.hasHelper(frame.Function) { + return CodeLocation{FileName: frame.File, LineNumber: frame.Line} + } + if !more { + break + } + } + return CodeLocation{} +} + +var clLocator = &codeLocationLocator{ + pcs: map[uintptr]bool{}, + helpers: map[string]bool{}, + lock: &sync.Mutex{}, +} + +// MarkAsHelper is used by GinkgoHelper to mark the caller (appropriately offset by skip)as a helper. You can use this directly if you need to provide an optional `skip` to mark functions further up the call stack as helpers. +func MarkAsHelper(optionalSkip ...int) { + skip := 1 + if len(optionalSkip) > 0 { + skip += optionalSkip[0] + } + pc, _, _, ok := runtime.Caller(skip) + if ok { + clLocator.addHelper(pc) + } +} + +func NewCustomCodeLocation(message string) CodeLocation { + return CodeLocation{ + CustomMessage: message, + } +} + +func NewCodeLocation(skip int) CodeLocation { + return clLocator.getCodeLocation(skip + 1) +} + +func NewCodeLocationWithStackTrace(skip int) CodeLocation { + cl := clLocator.getCodeLocation(skip + 1) + cl.FullStackTrace = PruneStack(string(debug.Stack()), skip+1) + return cl +} + +// PruneStack removes references to functions that are internal to Ginkgo +// and the Go runtime from a stack string and a certain number of stack entries +// at the beginning of the stack. The stack string has the format +// as returned by runtime/debug.Stack. The leading goroutine information is +// optional and always removed if present. Beware that runtime/debug.Stack +// adds itself as first entry, so typically skip must be >= 1 to remove that +// entry. +func PruneStack(fullStackTrace string, skip int) string { + stack := strings.Split(fullStackTrace, "\n") + // Ensure that the even entries are the method names and the + // odd entries the source code information. + if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") { + // Ignore "goroutine 29 [running]:" line. + stack = stack[1:] + } + // The "+1" is for skipping over the initial entry, which is + // runtime/debug.Stack() itself. + if len(stack) > 2*(skip+1) { + stack = stack[2*(skip+1):] + } + prunedStack := []string{} + if os.Getenv("GINKGO_PRUNE_STACK") == "FALSE" { + prunedStack = stack + } else { + re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) + for i := 0; i < len(stack)/2; i++ { + // We filter out based on the source code file name. + if !re.MatchString(stack[i*2+1]) { + prunedStack = append(prunedStack, stack[i*2]) + prunedStack = append(prunedStack, stack[i*2+1]) + } + } + } + return strings.Join(prunedStack, "\n") +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go new file mode 100644 index 00000000..c88fc85a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -0,0 +1,758 @@ +/* +Ginkgo accepts a number of configuration options. +These are documented [here](http://onsi.github.io/ginkgo/#the-ginkgo-cli) +*/ + +package types + +import ( + "flag" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "time" +) + +// Configuration controlling how an individual test suite is run +type SuiteConfig struct { + RandomSeed int64 + RandomizeAllSpecs bool + FocusStrings []string + SkipStrings []string + FocusFiles []string + SkipFiles []string + LabelFilter string + FailOnPending bool + FailFast bool + FlakeAttempts int + MustPassRepeatedly int + DryRun bool + PollProgressAfter time.Duration + PollProgressInterval time.Duration + Timeout time.Duration + EmitSpecProgress bool // this is deprecated but its removal is causing compile issue for some users that were setting it manually + OutputInterceptorMode string + SourceRoots []string + GracePeriod time.Duration + + ParallelProcess int + ParallelTotal int + ParallelHost string +} + +func NewDefaultSuiteConfig() SuiteConfig { + return SuiteConfig{ + RandomSeed: time.Now().Unix(), + Timeout: time.Hour, + ParallelProcess: 1, + ParallelTotal: 1, + GracePeriod: 30 * time.Second, + } +} + +type VerbosityLevel uint + +const ( + VerbosityLevelSuccinct VerbosityLevel = iota + VerbosityLevelNormal + VerbosityLevelVerbose + VerbosityLevelVeryVerbose +) + +func (vl VerbosityLevel) GT(comp VerbosityLevel) bool { + return vl > comp +} + +func (vl VerbosityLevel) GTE(comp VerbosityLevel) bool { + return vl >= comp +} + +func (vl VerbosityLevel) Is(comp VerbosityLevel) bool { + return vl == comp +} + +func (vl VerbosityLevel) LTE(comp VerbosityLevel) bool { + return vl <= comp +} + +func (vl VerbosityLevel) LT(comp VerbosityLevel) bool { + return vl < comp +} + +// Configuration for Ginkgo's reporter +type ReporterConfig struct { + NoColor bool + Succinct bool + Verbose bool + VeryVerbose bool + FullTrace bool + ShowNodeEvents bool + + JSONReport string + JUnitReport string + TeamcityReport string +} + +func (rc ReporterConfig) Verbosity() VerbosityLevel { + if rc.Succinct { + return VerbosityLevelSuccinct + } else if rc.Verbose { + return VerbosityLevelVerbose + } else if rc.VeryVerbose { + return VerbosityLevelVeryVerbose + } + return VerbosityLevelNormal +} + +func (rc ReporterConfig) WillGenerateReport() bool { + return rc.JSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != "" +} + +func NewDefaultReporterConfig() ReporterConfig { + return ReporterConfig{} +} + +// Configuration for the Ginkgo CLI +type CLIConfig struct { + //for build, run, and watch + Recurse bool + SkipPackage string + RequireSuite bool + NumCompilers int + + //for run and watch only + Procs int + Parallel bool + AfterRunHook string + OutputDir string + KeepSeparateCoverprofiles bool + KeepSeparateReports bool + + //for run only + KeepGoing bool + UntilItFails bool + Repeat int + RandomizeSuites bool + + //for watch only + Depth int + WatchRegExp string +} + +func NewDefaultCLIConfig() CLIConfig { + return CLIConfig{ + Depth: 1, + WatchRegExp: `\.go$`, + } +} + +func (g CLIConfig) ComputedProcs() int { + if g.Procs > 0 { + return g.Procs + } + + n := 1 + if g.Parallel { + n = runtime.NumCPU() + if n > 4 { + n = n - 1 + } + } + return n +} + +func (g CLIConfig) ComputedNumCompilers() int { + if g.NumCompilers > 0 { + return g.NumCompilers + } + + return runtime.NumCPU() +} + +// Configuration for the Ginkgo CLI capturing available go flags +// A subset of Go flags are exposed by Ginkgo. Some are available at compile time (e.g. ginkgo build) and others only at run time (e.g. ginkgo run - which has both build and run time flags). +// More details can be found at: +// https://docs.google.com/spreadsheets/d/1zkp-DS4hU4sAJl5eHh1UmgwxCPQhf3s5a8fbiOI8tJU/ +type GoFlagsConfig struct { + //build-time flags for code-and-performance analysis + Race bool + Cover bool + CoverMode string + CoverPkg string + Vet string + + //run-time flags for code-and-performance analysis + BlockProfile string + BlockProfileRate int + CoverProfile string + CPUProfile string + MemProfile string + MemProfileRate int + MutexProfile string + MutexProfileFraction int + Trace string + + //build-time flags for building + A bool + ASMFlags string + BuildMode string + Compiler string + GCCGoFlags string + GCFlags string + InstallSuffix string + LDFlags string + LinkShared bool + Mod string + N bool + ModFile string + ModCacheRW bool + MSan bool + PkgDir string + Tags string + TrimPath bool + ToolExec string + Work bool + X bool +} + +func NewDefaultGoFlagsConfig() GoFlagsConfig { + return GoFlagsConfig{} +} + +func (g GoFlagsConfig) BinaryMustBePreserved() bool { + return g.BlockProfile != "" || g.CPUProfile != "" || g.MemProfile != "" || g.MutexProfile != "" +} + +// Configuration that were deprecated in 2.0 +type deprecatedConfig struct { + DebugParallel bool + NoisySkippings bool + NoisyPendings bool + RegexScansFilePath bool + SlowSpecThresholdWithFLoatUnits float64 + Stream bool + Notify bool + EmitSpecProgress bool + SlowSpecThreshold time.Duration + AlwaysEmitGinkgoWriter bool +} + +// Flags + +// Flags sections used by both the CLI and the Ginkgo test process +var FlagSections = GinkgoFlagSections{ + {Key: "multiple-suites", Style: "{{dark-green}}", Heading: "Running Multiple Test Suites"}, + {Key: "order", Style: "{{green}}", Heading: "Controlling Test Order"}, + {Key: "parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism"}, + {Key: "low-level-parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism", + Description: "These are set by the Ginkgo CLI, {{red}}{{bold}}do not set them manually{{/}} via go test.\nUse ginkgo -p or ginkgo -procs=N instead."}, + {Key: "filter", Style: "{{cyan}}", Heading: "Filtering Tests"}, + {Key: "failure", Style: "{{red}}", Heading: "Failure Handling"}, + {Key: "output", Style: "{{magenta}}", Heading: "Controlling Output Formatting"}, + {Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis"}, + {Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis"}, + {Key: "debug", Style: "{{blue}}", Heading: "Debugging Tests", + Description: "In addition to these flags, Ginkgo supports a few debugging environment variables. To change the parallel server protocol set {{blue}}GINKGO_PARALLEL_PROTOCOL{{/}} to {{bold}}HTTP{{/}}. To avoid pruning callstacks set {{blue}}GINKGO_PRUNE_STACK{{/}} to {{bold}}FALSE{{/}}."}, + {Key: "watch", Style: "{{light-yellow}}", Heading: "Controlling Ginkgo Watch"}, + {Key: "misc", Style: "{{light-gray}}", Heading: "Miscellaneous"}, + {Key: "go-build", Style: "{{light-gray}}", Heading: "Go Build Flags", Succinct: true, + Description: "These flags are inherited from go build. Run {{bold}}ginkgo help build{{/}} for more detailed flag documentation."}, +} + +// SuiteConfigFlags provides flags for the Ginkgo test process, and CLI +var SuiteConfigFlags = GinkgoFlags{ + {KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo", + Usage: "The seed used to randomize the spec suite."}, + {KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."}, + + {KeyPath: "S.FailOnPending", Name: "fail-on-pending", SectionKey: "failure", DeprecatedName: "failOnPending", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will mark the test suite as failed if any specs are pending."}, + {KeyPath: "S.FailFast", Name: "fail-fast", SectionKey: "failure", DeprecatedName: "failFast", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will stop running a test suite after a failure occurs."}, + {KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags", + Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."}, + + {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."}, + {KeyPath: "S.PollProgressAfter", Name: "poll-progress-after", SectionKey: "debug", UsageDefaultValue: "0", + Usage: "Emit node progress reports periodically if node hasn't completed after this duration."}, + {KeyPath: "S.PollProgressInterval", Name: "poll-progress-interval", SectionKey: "debug", UsageDefaultValue: "10s", + Usage: "The rate at which to emit node progress reports after poll-progress-after has elapsed."}, + {KeyPath: "S.SourceRoots", Name: "source-root", SectionKey: "debug", + Usage: "The location to look for source code when generating progress reports. You can pass multiple --source-root flags."}, + {KeyPath: "S.Timeout", Name: "timeout", SectionKey: "debug", UsageDefaultValue: "1h", + Usage: "Test suite fails if it does not complete within the specified timeout."}, + {KeyPath: "S.GracePeriod", Name: "grace-period", SectionKey: "debug", UsageDefaultValue: "30s", + Usage: "When interrupted, Ginkgo will wait for GracePeriod for the current running node to exit before moving on to the next one."}, + {KeyPath: "S.OutputInterceptorMode", Name: "output-interceptor-mode", SectionKey: "debug", UsageArgument: "dup, swap, or none", + Usage: "If set, ginkgo will use the specified output interception strategy when running in parallel. Defaults to dup on unix and swap on windows."}, + + {KeyPath: "S.LabelFilter", Name: "label-filter", SectionKey: "filter", UsageArgument: "expression", + Usage: "If set, ginkgo will only run specs with labels that match the label-filter. The passed-in expression can include boolean operations (!, &&, ||, ','), groupings via '()', and regular expressions '/regexp/'. e.g. '(cat || dog) && !fruit'"}, + {KeyPath: "S.FocusStrings", Name: "focus", SectionKey: "filter", + Usage: "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed."}, + {KeyPath: "S.SkipStrings", Name: "skip", SectionKey: "filter", + Usage: "If set, ginkgo will only run specs that do not match this regular expression. Can be specified multiple times, values are ORed."}, + {KeyPath: "S.FocusFiles", Name: "focus-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line", + Usage: "If set, ginkgo will only run specs in matching files. Can be specified multiple times, values are ORed."}, + {KeyPath: "S.SkipFiles", Name: "skip-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line", + Usage: "If set, ginkgo will skip specs in matching files. Can be specified multiple times, values are ORed."}, + + {KeyPath: "D.RegexScansFilePath", DeprecatedName: "regexScansFilePath", DeprecatedDocLink: "removed--regexscansfilepath", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.DebugParallel", DeprecatedName: "debug", DeprecatedDocLink: "removed--debug", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.EmitSpecProgress", DeprecatedName: "progress", SectionKey: "debug", + DeprecatedVersion: "2.5.0", Usage: ". The functionality provided by --progress was confusing and is no longer needed. Use --show-node-events instead to see node entry and exit events included in the timeline of failed and verbose specs. Or you can run with -vv to always see all node events. Lastly, --poll-progress-after and the PollProgressAfter decorator now provide a better mechanism for debugging specs that tend to get stuck."}, +} + +// ParallelConfigFlags provides flags for the Ginkgo test process (not the CLI) +var ParallelConfigFlags = GinkgoFlags{ + {KeyPath: "S.ParallelProcess", Name: "parallel.process", SectionKey: "low-level-parallel", UsageDefaultValue: "1", + Usage: "This worker process's (one-indexed) process number. For running specs in parallel."}, + {KeyPath: "S.ParallelTotal", Name: "parallel.total", SectionKey: "low-level-parallel", UsageDefaultValue: "1", + Usage: "The total number of worker processes. For running specs in parallel."}, + {KeyPath: "S.ParallelHost", Name: "parallel.host", SectionKey: "low-level-parallel", UsageDefaultValue: "set by Ginkgo CLI", + Usage: "The address for the server that will synchronize the processes."}, +} + +// ReporterConfigFlags provides flags for the Ginkgo test process, and CLI +var ReporterConfigFlags = GinkgoFlags{ + {KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, suppress color output in default reporter."}, + {KeyPath: "R.Verbose", Name: "v", SectionKey: "output", + Usage: "If set, emits more output including GinkgoWriter contents."}, + {KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output", + Usage: "If set, emits with maximal verbosity - includes skipped and pending tests."}, + {KeyPath: "R.Succinct", Name: "succinct", SectionKey: "output", + Usage: "If set, default reporter prints out a very succinct report"}, + {KeyPath: "R.FullTrace", Name: "trace", SectionKey: "output", + Usage: "If set, default reporter prints out the full stack trace when a failure occurs"}, + {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output", + Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"}, + + {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", + Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, + {KeyPath: "R.JUnitReport", Name: "junit-report", UsageArgument: "filename.xml", SectionKey: "output", DeprecatedName: "reportFile", DeprecatedDocLink: "improved-reporting-infrastructure", + Usage: "If set, Ginkgo will generate a conformant junit test report in the specified file."}, + {KeyPath: "R.TeamcityReport", Name: "teamcity-report", UsageArgument: "filename", SectionKey: "output", + Usage: "If set, Ginkgo will generate a Teamcity-formatted test report at the specified location."}, + + {KeyPath: "D.SlowSpecThresholdWithFLoatUnits", DeprecatedName: "slowSpecThreshold", DeprecatedDocLink: "changed--slowspecthreshold", + Usage: "use --slow-spec-threshold instead and pass in a duration string (e.g. '5s', not '5.0')"}, + {KeyPath: "D.NoisyPendings", DeprecatedName: "noisyPendings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.NoisySkippings", DeprecatedName: "noisySkippings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.SlowSpecThreshold", DeprecatedName: "slow-spec-threshold", SectionKey: "output", Usage: "--slow-spec-threshold has been deprecated and will be removed in a future version of Ginkgo. This feature has proved to be more noisy than useful. You can use --poll-progress-after, instead, to get more actionable feedback about potentially slow specs and understand where they might be getting stuck.", DeprecatedVersion: "2.5.0"}, + {KeyPath: "D.AlwaysEmitGinkgoWriter", DeprecatedName: "always-emit-ginkgo-writer", SectionKey: "output", Usage: " - use -v instead, or one of Ginkgo's machine-readable report formats to get GinkgoWriter output for passing specs."}, +} + +// BuildTestSuiteFlagSet attaches to the CommandLine flagset and provides flags for the Ginkgo test process +func BuildTestSuiteFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig) (GinkgoFlagSet, error) { + flags := SuiteConfigFlags.CopyAppend(ParallelConfigFlags...).CopyAppend(ReporterConfigFlags...) + flags = flags.WithPrefix("ginkgo") + bindings := map[string]interface{}{ + "S": suiteConfig, + "R": reporterConfig, + "D": &deprecatedConfig{}, + } + extraGoFlagsSection := GinkgoFlagSection{Style: "{{gray}}", Heading: "Go test flags"} + + return NewAttachedGinkgoFlagSet(flag.CommandLine, flags, bindings, FlagSections, extraGoFlagsSection) +} + +// VetConfig validates that the Ginkgo test process' configuration is sound +func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig ReporterConfig) []error { + errors := []error{} + + if flagSet.WasSet("count") || flagSet.WasSet("test.count") { + flag := flagSet.Lookup("count") + if flag == nil { + flag = flagSet.Lookup("test.count") + } + count, err := strconv.Atoi(flag.Value.String()) + if err != nil || count != 1 { + errors = append(errors, GinkgoErrors.InvalidGoFlagCount()) + } + } + + if flagSet.WasSet("parallel") || flagSet.WasSet("test.parallel") { + errors = append(errors, GinkgoErrors.InvalidGoFlagParallel()) + } + + if suiteConfig.ParallelTotal < 1 { + errors = append(errors, GinkgoErrors.InvalidParallelTotalConfiguration()) + } + + if suiteConfig.ParallelProcess > suiteConfig.ParallelTotal || suiteConfig.ParallelProcess < 1 { + errors = append(errors, GinkgoErrors.InvalidParallelProcessConfiguration()) + } + + if suiteConfig.ParallelTotal > 1 && suiteConfig.ParallelHost == "" { + errors = append(errors, GinkgoErrors.MissingParallelHostConfiguration()) + } + + if suiteConfig.DryRun && suiteConfig.ParallelTotal > 1 { + errors = append(errors, GinkgoErrors.DryRunInParallelConfiguration()) + } + + if suiteConfig.GracePeriod <= 0 { + errors = append(errors, GinkgoErrors.GracePeriodCannotBeZero()) + } + + if len(suiteConfig.FocusFiles) > 0 { + _, err := ParseFileFilters(suiteConfig.FocusFiles) + if err != nil { + errors = append(errors, err) + } + } + + if len(suiteConfig.SkipFiles) > 0 { + _, err := ParseFileFilters(suiteConfig.SkipFiles) + if err != nil { + errors = append(errors, err) + } + } + + if suiteConfig.LabelFilter != "" { + _, err := ParseLabelFilter(suiteConfig.LabelFilter) + if err != nil { + errors = append(errors, err) + } + } + + switch strings.ToLower(suiteConfig.OutputInterceptorMode) { + case "", "dup", "swap", "none": + default: + errors = append(errors, GinkgoErrors.InvalidOutputInterceptorModeConfiguration(suiteConfig.OutputInterceptorMode)) + } + + numVerbosity := 0 + for _, v := range []bool{reporterConfig.Succinct, reporterConfig.Verbose, reporterConfig.VeryVerbose} { + if v { + numVerbosity++ + } + } + if numVerbosity > 1 { + errors = append(errors, GinkgoErrors.ConflictingVerbosityConfiguration()) + } + + return errors +} + +// GinkgoCLISharedFlags provides flags shared by the Ginkgo CLI's build, watch, and run commands +var GinkgoCLISharedFlags = GinkgoFlags{ + {KeyPath: "C.Recurse", Name: "r", SectionKey: "multiple-suites", + Usage: "If set, ginkgo finds and runs test suites under the current directory recursively."}, + {KeyPath: "C.SkipPackage", Name: "skip-package", SectionKey: "multiple-suites", DeprecatedName: "skipPackage", DeprecatedDocLink: "changed-command-line-flags", + UsageArgument: "comma-separated list of packages", + Usage: "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored."}, + {KeyPath: "C.RequireSuite", Name: "require-suite", SectionKey: "failure", DeprecatedName: "requireSuite", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, Ginkgo fails if there are ginkgo tests in a directory but no invocation of RunSpecs."}, + {KeyPath: "C.NumCompilers", Name: "compilers", SectionKey: "multiple-suites", UsageDefaultValue: "0 (will autodetect)", + Usage: "When running multiple packages, the number of concurrent compilations to perform."}, +} + +// GinkgoCLIRunAndWatchFlags provides flags shared by the Ginkgo CLI's build and watch commands (but not run) +var GinkgoCLIRunAndWatchFlags = GinkgoFlags{ + {KeyPath: "C.Procs", Name: "procs", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)", + Usage: "The number of parallel test nodes to run."}, + {KeyPath: "C.Procs", Name: "nodes", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)", + Usage: "--nodes is an alias for --procs"}, + {KeyPath: "C.Parallel", Name: "p", SectionKey: "parallel", + Usage: "If set, ginkgo will run in parallel with an auto-detected number of nodes."}, + {KeyPath: "C.AfterRunHook", Name: "after-run-hook", SectionKey: "misc", DeprecatedName: "afterSuiteHook", DeprecatedDocLink: "changed-command-line-flags", + Usage: "Command to run when a test suite completes."}, + {KeyPath: "C.OutputDir", Name: "output-dir", SectionKey: "output", UsageArgument: "directory", DeprecatedName: "outputdir", DeprecatedDocLink: "improved-profiling-support", + Usage: "A location to place all generated profiles and reports."}, + {KeyPath: "C.KeepSeparateCoverprofiles", Name: "keep-separate-coverprofiles", SectionKey: "code-and-coverage-analysis", + Usage: "If set, Ginkgo does not merge coverprofiles into one monolithic coverprofile. The coverprofiles will remain in their respective package directories or in -output-dir if set."}, + {KeyPath: "C.KeepSeparateReports", Name: "keep-separate-reports", SectionKey: "output", + Usage: "If set, Ginkgo does not merge per-suite reports (e.g. -json-report) into one monolithic report for the entire testrun. The reports will remain in their respective package directories or in -output-dir if set."}, + + {KeyPath: "D.Stream", DeprecatedName: "stream", DeprecatedDocLink: "removed--stream", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.Notify", DeprecatedName: "notify", DeprecatedDocLink: "removed--notify", DeprecatedVersion: "2.0.0"}, +} + +// GinkgoCLIRunFlags provides flags for Ginkgo CLI's run command that aren't shared by any other commands +var GinkgoCLIRunFlags = GinkgoFlags{ + {KeyPath: "C.KeepGoing", Name: "keep-going", SectionKey: "multiple-suites", DeprecatedName: "keepGoing", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, failures from earlier test suites do not prevent later test suites from running."}, + {KeyPath: "C.UntilItFails", Name: "until-it-fails", SectionKey: "debug", DeprecatedName: "untilItFails", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will keep rerunning test suites until a failure occurs."}, + {KeyPath: "C.Repeat", Name: "repeat", SectionKey: "debug", UsageArgument: "n", UsageDefaultValue: "0 - i.e. no repetition, run only once", + Usage: "The number of times to re-run a test-suite. Useful for debugging flaky tests. If set to N the suite will be run N+1 times and will be required to pass each time."}, + {KeyPath: "C.RandomizeSuites", Name: "randomize-suites", SectionKey: "order", DeprecatedName: "randomizeSuites", DeprecatedDocLink: "changed-command-line-flags", + Usage: "If set, ginkgo will randomize the order in which test suites run."}, +} + +// GinkgoCLIRunFlags provides flags for Ginkgo CLI's watch command that aren't shared by any other commands +var GinkgoCLIWatchFlags = GinkgoFlags{ + {KeyPath: "C.Depth", Name: "depth", SectionKey: "watch", + Usage: "Ginkgo will watch dependencies down to this depth in the dependency tree."}, + {KeyPath: "C.WatchRegExp", Name: "watch-regexp", SectionKey: "watch", DeprecatedName: "watchRegExp", DeprecatedDocLink: "changed-command-line-flags", + UsageArgument: "Regular Expression", + UsageDefaultValue: `\.go$`, + Usage: "Only files matching this regular expression will be watched for changes."}, +} + +// GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI +var GoBuildFlags = GinkgoFlags{ + {KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis", + Usage: "enable data race detection. Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64, linux/ppc64le and linux/arm64 (only for 48-bit VMA)."}, + {KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis", + Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`}, + {KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis", + Usage: "Enable coverage analysis. Note that because coverage works by annotating the source code before compilation, compilation and test failures with coverage enabled may report line numbers that don't correspond to the original sources."}, + {KeyPath: "Go.CoverMode", Name: "covermode", UsageArgument: "set,count,atomic", SectionKey: "code-and-coverage-analysis", + Usage: `Set the mode for coverage analysis for the package[s] being tested. 'set': does this statement run? 'count': how many times does this statement run? 'atomic': like count, but correct in multithreaded tests and more expensive (must use atomic with -race). Sets -cover`}, + {KeyPath: "Go.CoverPkg", Name: "coverpkg", UsageArgument: "pattern1,pattern2,pattern3", SectionKey: "code-and-coverage-analysis", + Usage: "Apply coverage analysis in each test to packages matching the patterns. The default is for each test to analyze only the package being tested. See 'go help packages' for a description of package patterns. Sets -cover."}, + + {KeyPath: "Go.A", Name: "a", SectionKey: "go-build", + Usage: "force rebuilding of packages that are already up-to-date."}, + {KeyPath: "Go.ASMFlags", Name: "asmflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each go tool asm invocation."}, + {KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build", + Usage: "build mode to use. See 'go help buildmode' for more."}, + {KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build", + Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."}, + {KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each gccgo compiler/linker invocation."}, + {KeyPath: "Go.GCFlags", Name: "gcflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each go tool compile invocation."}, + {KeyPath: "Go.InstallSuffix", Name: "installsuffix", SectionKey: "go-build", + Usage: "a suffix to use in the name of the package installation directory, in order to keep output separate from default builds. If using the -race flag, the install suffix is automatically set to raceor, if set explicitly, has _race appended to it. Likewise for the -msan flag. Using a -buildmode option that requires non-default compile flags has a similar effect."}, + {KeyPath: "Go.LDFlags", Name: "ldflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", + Usage: "arguments to pass on each go tool link invocation."}, + {KeyPath: "Go.LinkShared", Name: "linkshared", SectionKey: "go-build", + Usage: "build code that will be linked against shared libraries previously created with -buildmode=shared."}, + {KeyPath: "Go.Mod", Name: "mod", UsageArgument: "mode (readonly, vendor, or mod)", SectionKey: "go-build", + Usage: "module download mode to use: readonly, vendor, or mod. See 'go help modules' for more."}, + {KeyPath: "Go.ModCacheRW", Name: "modcacherw", SectionKey: "go-build", + Usage: "leave newly-created directories in the module cache read-write instead of making them read-only."}, + {KeyPath: "Go.ModFile", Name: "modfile", UsageArgument: "file", SectionKey: "go-build", + Usage: `in module aware mode, read (and possibly write) an alternate go.mod file instead of the one in the module root directory. A file named go.mod must still be present in order to determine the module root directory, but it is not accessed. When -modfile is specified, an alternate go.sum file is also used: its path is derived from the -modfile flag by trimming the ".mod" extension and appending ".sum".`}, + {KeyPath: "Go.MSan", Name: "msan", SectionKey: "go-build", + Usage: "enable interoperation with memory sanitizer. Supported only on linux/amd64, linux/arm64 and only with Clang/LLVM as the host C compiler. On linux/arm64, pie build mode will be used."}, + {KeyPath: "Go.N", Name: "n", SectionKey: "go-build", + Usage: "print the commands but do not run them."}, + {KeyPath: "Go.PkgDir", Name: "pkgdir", UsageArgument: "dir", SectionKey: "go-build", + Usage: "install and load all packages from dir instead of the usual locations. For example, when building with a non-standard configuration, use -pkgdir to keep generated packages in a separate location."}, + {KeyPath: "Go.Tags", Name: "tags", UsageArgument: "tag,list", SectionKey: "go-build", + Usage: "a comma-separated list of build tags to consider satisfied during the build. For more information about build tags, see the description of build constraints in the documentation for the go/build package. (Earlier versions of Go used a space-separated list, and that form is deprecated but still recognized.)"}, + {KeyPath: "Go.TrimPath", Name: "trimpath", SectionKey: "go-build", + Usage: `remove all file system paths from the resulting executable. Instead of absolute file system paths, the recorded file names will begin with either "go" (for the standard library), or a module path@version (when using modules), or a plain import path (when using GOPATH).`}, + {KeyPath: "Go.ToolExec", Name: "toolexec", UsageArgument: "'cmd args'", SectionKey: "go-build", + Usage: "a program to use to invoke toolchain programs like vet and asm. For example, instead of running asm, the go command will run cmd args /path/to/asm '."}, + {KeyPath: "Go.Work", Name: "work", SectionKey: "go-build", + Usage: "print the name of the temporary work directory and do not delete it when exiting."}, + {KeyPath: "Go.X", Name: "x", SectionKey: "go-build", + Usage: "print the commands."}, +} + +// GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI +var GoRunFlags = GinkgoFlags{ + {KeyPath: "Go.CoverProfile", Name: "coverprofile", UsageArgument: "file", SectionKey: "code-and-coverage-analysis", + Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover.`}, + {KeyPath: "Go.BlockProfile", Name: "blockprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write a goroutine blocking profile to the specified file when all tests are complete. Preserves test binary.`}, + {KeyPath: "Go.BlockProfileRate", Name: "blockprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis", + Usage: `Control the detail provided in goroutine blocking profiles by calling runtime.SetBlockProfileRate with rate. See 'go doc runtime.SetBlockProfileRate'. The profiler aims to sample, on average, one blocking event every n nanoseconds the program spends blocked. By default, if -test.blockprofile is set without this flag, all blocking events are recorded, equivalent to -test.blockprofilerate=1.`}, + {KeyPath: "Go.CPUProfile", Name: "cpuprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write a CPU profile to the specified file before exiting. Preserves test binary.`}, + {KeyPath: "Go.MemProfile", Name: "memprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write an allocation profile to the file after all tests have passed. Preserves test binary.`}, + {KeyPath: "Go.MemProfileRate", Name: "memprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis", + Usage: `Enable more precise (and expensive) memory allocation profiles by setting runtime.MemProfileRate. See 'go doc runtime.MemProfileRate'. To profile all memory allocations, use -test.memprofilerate=1.`}, + {KeyPath: "Go.MutexProfile", Name: "mutexprofile", UsageArgument: "file", SectionKey: "performance-analysis", + Usage: `Write a mutex contention profile to the specified file when all tests are complete. Preserves test binary.`}, + {KeyPath: "Go.MutexProfileFraction", Name: "mutexprofilefraction", UsageArgument: "n", SectionKey: "performance-analysis", + Usage: `if >= 0, calls runtime.SetMutexProfileFraction() Sample 1 in n stack traces of goroutines holding a contended mutex.`}, + {KeyPath: "Go.Trace", Name: "execution-trace", UsageArgument: "file", ExportAs: "trace", SectionKey: "performance-analysis", + Usage: `Write an execution trace to the specified file before exiting.`}, +} + +// VetAndInitializeCLIAndGoConfig validates that the Ginkgo CLI's configuration is sound +// It returns a potentially mutated copy of the config that rationalizes the configuration to ensure consistency for downstream consumers +func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsConfig) (CLIConfig, GoFlagsConfig, []error) { + errors := []error{} + + if cliConfig.Repeat > 0 && cliConfig.UntilItFails { + errors = append(errors, GinkgoErrors.BothRepeatAndUntilItFails()) + } + + //initialize the output directory + if cliConfig.OutputDir != "" { + err := os.MkdirAll(cliConfig.OutputDir, 0777) + if err != nil { + errors = append(errors, err) + } + } + + //ensure cover mode is configured appropriately + if goFlagsConfig.CoverMode != "" || goFlagsConfig.CoverPkg != "" || goFlagsConfig.CoverProfile != "" { + goFlagsConfig.Cover = true + } + if goFlagsConfig.Cover && goFlagsConfig.CoverProfile == "" { + goFlagsConfig.CoverProfile = "coverprofile.out" + } + + return cliConfig, goFlagsConfig, errors +} + +// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test +func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string, pathToInvocationPath string) ([]string, error) { + // if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure + // the built test binary can generate a coverprofile + if goFlagsConfig.CoverProfile != "" { + goFlagsConfig.Cover = true + } + + if goFlagsConfig.CoverPkg != "" { + coverPkgs := strings.Split(goFlagsConfig.CoverPkg, ",") + adjustedCoverPkgs := make([]string, len(coverPkgs)) + for i, coverPkg := range coverPkgs { + coverPkg = strings.Trim(coverPkg, " ") + if strings.HasPrefix(coverPkg, "./") { + // this is a relative coverPkg - we need to reroot it + adjustedCoverPkgs[i] = "./" + filepath.Join(pathToInvocationPath, strings.TrimPrefix(coverPkg, "./")) + } else { + // this is a package name - don't touch it + adjustedCoverPkgs[i] = coverPkg + } + } + goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",") + } + + args := []string{"test", "-c", "-o", destination, packageToBuild} + goArgs, err := GenerateFlagArgs( + GoBuildFlags, + map[string]interface{}{ + "Go": &goFlagsConfig, + }, + ) + + if err != nil { + return []string{}, err + } + args = append(args, goArgs...) + return args, nil +} + +// GenerateGinkgoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled Ginkgo test binary +func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterConfig, goFlagsConfig GoFlagsConfig) ([]string, error) { + var flags GinkgoFlags + flags = SuiteConfigFlags.WithPrefix("ginkgo") + flags = flags.CopyAppend(ParallelConfigFlags.WithPrefix("ginkgo")...) + flags = flags.CopyAppend(ReporterConfigFlags.WithPrefix("ginkgo")...) + flags = flags.CopyAppend(GoRunFlags.WithPrefix("test")...) + bindings := map[string]interface{}{ + "S": &suiteConfig, + "R": &reporterConfig, + "Go": &goFlagsConfig, + } + + return GenerateFlagArgs(flags, bindings) +} + +// GenerateGoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled non-Ginkgo test binary +func GenerateGoTestRunArgs(goFlagsConfig GoFlagsConfig) ([]string, error) { + flags := GoRunFlags.WithPrefix("test") + bindings := map[string]interface{}{ + "Go": &goFlagsConfig, + } + + args, err := GenerateFlagArgs(flags, bindings) + if err != nil { + return args, err + } + args = append(args, "--test.v") + return args, nil +} + +// BuildRunCommandFlagSet builds the FlagSet for the `ginkgo run` command +func BuildRunCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { + flags := SuiteConfigFlags + flags = flags.CopyAppend(ReporterConfigFlags...) + flags = flags.CopyAppend(GinkgoCLISharedFlags...) + flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...) + flags = flags.CopyAppend(GinkgoCLIRunFlags...) + flags = flags.CopyAppend(GoBuildFlags...) + flags = flags.CopyAppend(GoRunFlags...) + + bindings := map[string]interface{}{ + "S": suiteConfig, + "R": reporterConfig, + "C": cliConfig, + "Go": goFlagsConfig, + "D": &deprecatedConfig{}, + } + + return NewGinkgoFlagSet(flags, bindings, FlagSections) +} + +// BuildWatchCommandFlagSet builds the FlagSet for the `ginkgo watch` command +func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { + flags := SuiteConfigFlags + flags = flags.CopyAppend(ReporterConfigFlags...) + flags = flags.CopyAppend(GinkgoCLISharedFlags...) + flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...) + flags = flags.CopyAppend(GinkgoCLIWatchFlags...) + flags = flags.CopyAppend(GoBuildFlags...) + flags = flags.CopyAppend(GoRunFlags...) + + bindings := map[string]interface{}{ + "S": suiteConfig, + "R": reporterConfig, + "C": cliConfig, + "Go": goFlagsConfig, + "D": &deprecatedConfig{}, + } + + return NewGinkgoFlagSet(flags, bindings, FlagSections) +} + +// BuildBuildCommandFlagSet builds the FlagSet for the `ginkgo build` command +func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { + flags := GinkgoCLISharedFlags + flags = flags.CopyAppend(GoBuildFlags...) + + bindings := map[string]interface{}{ + "C": cliConfig, + "Go": goFlagsConfig, + "D": &deprecatedConfig{}, + } + + flagSections := make(GinkgoFlagSections, len(FlagSections)) + copy(flagSections, FlagSections) + for i := range flagSections { + if flagSections[i].Key == "multiple-suites" { + flagSections[i].Heading = "Building Multiple Suites" + } + if flagSections[i].Key == "go-build" { + flagSections[i] = GinkgoFlagSection{Key: "go-build", Style: "{{/}}", Heading: "Go Build Flags", + Description: "These flags are inherited from go build."} + } + } + + return NewGinkgoFlagSet(flags, bindings, flagSections) +} + +func BuildLabelsCommandFlagSet(cliConfig *CLIConfig) (GinkgoFlagSet, error) { + flags := GinkgoCLISharedFlags.SubsetWithNames("r", "skip-package") + + bindings := map[string]interface{}{ + "C": cliConfig, + } + + flagSections := make(GinkgoFlagSections, len(FlagSections)) + copy(flagSections, FlagSections) + for i := range flagSections { + if flagSections[i].Key == "multiple-suites" { + flagSections[i].Heading = "Fetching Labels from Multiple Suites" + } + } + + return NewGinkgoFlagSet(flags, bindings, flagSections) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go new file mode 100644 index 00000000..17922304 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go @@ -0,0 +1,141 @@ +package types + +import ( + "strconv" + "time" +) + +/* + A set of deprecations to make the transition from v1 to v2 easier for users who have written custom reporters. +*/ + +type SuiteSummary = DeprecatedSuiteSummary +type SetupSummary = DeprecatedSetupSummary +type SpecSummary = DeprecatedSpecSummary +type SpecMeasurement = DeprecatedSpecMeasurement +type SpecComponentType = NodeType +type SpecFailure = DeprecatedSpecFailure + +var ( + SpecComponentTypeInvalid = NodeTypeInvalid + SpecComponentTypeContainer = NodeTypeContainer + SpecComponentTypeIt = NodeTypeIt + SpecComponentTypeBeforeEach = NodeTypeBeforeEach + SpecComponentTypeJustBeforeEach = NodeTypeJustBeforeEach + SpecComponentTypeAfterEach = NodeTypeAfterEach + SpecComponentTypeJustAfterEach = NodeTypeJustAfterEach + SpecComponentTypeBeforeSuite = NodeTypeBeforeSuite + SpecComponentTypeSynchronizedBeforeSuite = NodeTypeSynchronizedBeforeSuite + SpecComponentTypeAfterSuite = NodeTypeAfterSuite + SpecComponentTypeSynchronizedAfterSuite = NodeTypeSynchronizedAfterSuite +) + +type DeprecatedSuiteSummary struct { + SuiteDescription string + SuiteSucceeded bool + SuiteID string + + NumberOfSpecsBeforeParallelization int + NumberOfTotalSpecs int + NumberOfSpecsThatWillBeRun int + NumberOfPendingSpecs int + NumberOfSkippedSpecs int + NumberOfPassedSpecs int + NumberOfFailedSpecs int + NumberOfFlakedSpecs int + RunTime time.Duration +} + +type DeprecatedSetupSummary struct { + ComponentType SpecComponentType + CodeLocation CodeLocation + + State SpecState + RunTime time.Duration + Failure SpecFailure + + CapturedOutput string + SuiteID string +} + +type DeprecatedSpecSummary struct { + ComponentTexts []string + ComponentCodeLocations []CodeLocation + + State SpecState + RunTime time.Duration + Failure SpecFailure + IsMeasurement bool + NumberOfSamples int + Measurements map[string]*DeprecatedSpecMeasurement + + CapturedOutput string + SuiteID string +} + +func (s DeprecatedSpecSummary) HasFailureState() bool { + return s.State.Is(SpecStateFailureStates) +} + +func (s DeprecatedSpecSummary) TimedOut() bool { + return false +} + +func (s DeprecatedSpecSummary) Panicked() bool { + return s.State == SpecStatePanicked +} + +func (s DeprecatedSpecSummary) Failed() bool { + return s.State == SpecStateFailed +} + +func (s DeprecatedSpecSummary) Passed() bool { + return s.State == SpecStatePassed +} + +func (s DeprecatedSpecSummary) Skipped() bool { + return s.State == SpecStateSkipped +} + +func (s DeprecatedSpecSummary) Pending() bool { + return s.State == SpecStatePending +} + +type DeprecatedSpecFailure struct { + Message string + Location CodeLocation + ForwardedPanic string + + ComponentIndex int + ComponentType SpecComponentType + ComponentCodeLocation CodeLocation +} + +type DeprecatedSpecMeasurement struct { + Name string + Info interface{} + Order int + + Results []float64 + + Smallest float64 + Largest float64 + Average float64 + StdDeviation float64 + + SmallestLabel string + LargestLabel string + AverageLabel string + Units string + Precision int +} + +func (s DeprecatedSpecMeasurement) PrecisionFmt() string { + if s.Precision == 0 { + return "%f" + } + + str := strconv.Itoa(s.Precision) + + return "%." + str + "f" +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go new file mode 100644 index 00000000..e2519f67 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go @@ -0,0 +1,177 @@ +package types + +import ( + "os" + "strconv" + "strings" + "sync" + "unicode" + + "github.com/onsi/ginkgo/v2/formatter" +) + +type Deprecation struct { + Message string + DocLink string + Version string +} + +type deprecations struct{} + +var Deprecations = deprecations{} + +func (d deprecations) CustomReporter() Deprecation { + return Deprecation{ + Message: "Support for custom reporters has been removed in V2. Please read the documentation linked to below for Ginkgo's new behavior and for a migration path:", + DocLink: "removed-custom-reporters", + Version: "1.16.0", + } +} + +func (d deprecations) Async() Deprecation { + return Deprecation{ + Message: "You are passing a Done channel to a test node to test asynchronous behavior. This is deprecated in Ginkgo V2. Your test will run synchronously and the timeout will be ignored.", + DocLink: "removed-async-testing", + Version: "1.16.0", + } +} + +func (d deprecations) Measure() Deprecation { + return Deprecation{ + Message: "Measure is deprecated and has been removed from Ginkgo V2. Any Measure tests in your spec will not run. Please migrate to gomega/gmeasure.", + DocLink: "removed-measure", + Version: "1.16.3", + } +} + +func (d deprecations) ParallelNode() Deprecation { + return Deprecation{ + Message: "GinkgoParallelNode is deprecated and will be removed in Ginkgo V2. Please use GinkgoParallelProcess instead.", + DocLink: "renamed-ginkgoparallelnode", + Version: "1.16.4", + } +} + +func (d deprecations) CurrentGinkgoTestDescription() Deprecation { + return Deprecation{ + Message: "CurrentGinkgoTestDescription() is deprecated in Ginkgo V2. Use CurrentSpecReport() instead.", + DocLink: "changed-currentginkgotestdescription", + Version: "1.16.0", + } +} + +func (d deprecations) Convert() Deprecation { + return Deprecation{ + Message: "The convert command is deprecated in Ginkgo V2", + DocLink: "removed-ginkgo-convert", + Version: "1.16.0", + } +} + +func (d deprecations) Blur() Deprecation { + return Deprecation{ + Message: "The blur command is deprecated in Ginkgo V2. Use 'ginkgo unfocus' instead.", + Version: "1.16.0", + } +} + +func (d deprecations) Nodot() Deprecation { + return Deprecation{ + Message: "The nodot command is deprecated in Ginkgo V2. Please either dot-import Ginkgo or use the package identifier in your code to references objects and types provided by Ginkgo and Gomega.", + DocLink: "removed-ginkgo-nodot", + Version: "1.16.0", + } +} + +func (d deprecations) SuppressProgressReporting() Deprecation { + return Deprecation{ + Message: "Improvements to how reporters emit timeline information means that SuppressProgressReporting is no longer necessary and has been deprecated.", + Version: "2.5.0", + } +} + +type DeprecationTracker struct { + deprecations map[Deprecation][]CodeLocation + lock *sync.Mutex +} + +func NewDeprecationTracker() *DeprecationTracker { + return &DeprecationTracker{ + deprecations: map[Deprecation][]CodeLocation{}, + lock: &sync.Mutex{}, + } +} + +func (d *DeprecationTracker) TrackDeprecation(deprecation Deprecation, cl ...CodeLocation) { + ackVersion := os.Getenv("ACK_GINKGO_DEPRECATIONS") + if deprecation.Version != "" && ackVersion != "" { + ack := ParseSemVer(ackVersion) + version := ParseSemVer(deprecation.Version) + if ack.GreaterThanOrEqualTo(version) { + return + } + } + + d.lock.Lock() + defer d.lock.Unlock() + if len(cl) == 1 { + d.deprecations[deprecation] = append(d.deprecations[deprecation], cl[0]) + } else { + d.deprecations[deprecation] = []CodeLocation{} + } +} + +func (d *DeprecationTracker) DidTrackDeprecations() bool { + d.lock.Lock() + defer d.lock.Unlock() + return len(d.deprecations) > 0 +} + +func (d *DeprecationTracker) DeprecationsReport() string { + d.lock.Lock() + defer d.lock.Unlock() + out := formatter.F("{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n") + out += formatter.F("{{light-yellow}}============================================={{/}}\n") + for deprecation, locations := range d.deprecations { + out += formatter.Fi(1, "{{yellow}}"+deprecation.Message+"{{/}}\n") + if deprecation.DocLink != "" { + out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://onsi.github.io/ginkgo/MIGRATING_TO_V2#%s{{/}}\n", deprecation.DocLink) + } + for _, location := range locations { + out += formatter.Fi(2, "{{gray}}%s{{/}}\n", location) + } + } + out += formatter.F("\n{{gray}}To silence deprecations that can be silenced set the following environment variable:{{/}}\n") + out += formatter.Fi(1, "{{gray}}ACK_GINKGO_DEPRECATIONS=%s{{/}}\n", VERSION) + return out +} + +type SemVer struct { + Major int + Minor int + Patch int +} + +func (s SemVer) GreaterThanOrEqualTo(o SemVer) bool { + return (s.Major > o.Major) || + (s.Major == o.Major && s.Minor > o.Minor) || + (s.Major == o.Major && s.Minor == o.Minor && s.Patch >= o.Patch) +} + +func ParseSemVer(semver string) SemVer { + out := SemVer{} + semver = strings.TrimFunc(semver, func(r rune) bool { + return !(unicode.IsNumber(r) || r == '.') + }) + components := strings.Split(semver, ".") + if len(components) > 0 { + out.Major, _ = strconv.Atoi(components[0]) + } + if len(components) > 1 { + out.Minor, _ = strconv.Atoi(components[1]) + } + if len(components) > 2 { + out.Patch, _ = strconv.Atoi(components[2]) + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go b/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go new file mode 100644 index 00000000..1d96ae02 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go @@ -0,0 +1,43 @@ +package types + +import "encoding/json" + +type EnumSupport struct { + toString map[uint]string + toEnum map[string]uint + maxEnum uint +} + +func NewEnumSupport(toString map[uint]string) EnumSupport { + toEnum, maxEnum := map[string]uint{}, uint(0) + for k, v := range toString { + toEnum[v] = k + if maxEnum < k { + maxEnum = k + } + } + return EnumSupport{toString: toString, toEnum: toEnum, maxEnum: maxEnum} +} + +func (es EnumSupport) String(e uint) string { + if e > es.maxEnum { + return es.toString[0] + } + return es.toString[e] +} + +func (es EnumSupport) UnmarshJSON(b []byte) (uint, error) { + var dec string + if err := json.Unmarshal(b, &dec); err != nil { + return 0, err + } + out := es.toEnum[dec] // if we miss we get 0 which is what we want anyway + return out, nil +} + +func (es EnumSupport) MarshJSON(e uint) ([]byte, error) { + if e == 0 || e > es.maxEnum { + return json.Marshal(nil) + } + return json.Marshal(es.toString[e]) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go new file mode 100644 index 00000000..4fbdc3e9 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -0,0 +1,630 @@ +package types + +import ( + "fmt" + "reflect" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" +) + +type GinkgoError struct { + Heading string + Message string + DocLink string + CodeLocation CodeLocation +} + +func (g GinkgoError) Error() string { + out := formatter.F("{{bold}}{{red}}%s{{/}}\n", g.Heading) + if (g.CodeLocation != CodeLocation{}) { + contentsOfLine := strings.TrimLeft(g.CodeLocation.ContentsOfLine(), "\t ") + if contentsOfLine != "" { + out += formatter.F("{{light-gray}}%s{{/}}\n", contentsOfLine) + } + out += formatter.F("{{gray}}%s{{/}}\n", g.CodeLocation) + } + if g.Message != "" { + out += formatter.Fiw(1, formatter.COLS, g.Message) + out += "\n\n" + } + if g.DocLink != "" { + out += formatter.Fiw(1, formatter.COLS, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}\n", g.DocLink) + } + + return out +} + +type ginkgoErrors struct{} + +var GinkgoErrors = ginkgoErrors{} + +func (g ginkgoErrors) UncaughtGinkgoPanic(cl CodeLocation) error { + return GinkgoError{ + Heading: "Your Test Panicked", + Message: `When you, or your assertion library, calls Ginkgo's Fail(), +Ginkgo panics to prevent subsequent assertions from running. + +Normally Ginkgo rescues this panic so you shouldn't see it. + +However, if you make an assertion in a goroutine, Ginkgo can't capture the panic. +To circumvent this, you should call + + defer GinkgoRecover() + +at the top of the goroutine that caused this panic. + +Alternatively, you may have made an assertion outside of a Ginkgo +leaf node (e.g. in a container node or some out-of-band function) - please move your assertion to +an appropriate Ginkgo node (e.g. a BeforeSuite, BeforeEach, It, etc...).`, + DocLink: "mental-model-how-ginkgo-handles-failure", + CodeLocation: cl, + } +} + +func (g ginkgoErrors) RerunningSuite() error { + return GinkgoError{ + Heading: "Rerunning Suite", + Message: formatter.F(`It looks like you are calling RunSpecs more than once. Ginkgo does not support rerunning suites. If you want to rerun a suite try {{bold}}ginkgo --repeat=N{{/}} or {{bold}}ginkgo --until-it-fails{{/}}`), + DocLink: "repeating-spec-runs-and-managing-flaky-specs", + } +} + +/* Tree construction errors */ + +func (g ginkgoErrors) PushingNodeInRunPhase(nodeType NodeType, cl CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node +to the Ginkgo spec tree in a leaf node {{bold}}after{{/}} the specs started running. + +To enable randomization and parallelization Ginkgo requires the spec tree +to be fully constructed up front. In practice, this means that you can +only create nodes like {{bold}}[%s]{{/}} at the top-level or within the +body of a {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}.`, nodeType, nodeType), + CodeLocation: cl, + DocLink: "mental-model-how-ginkgo-traverses-the-spec-hierarchy", + } +} + +func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic interface{}, cl CodeLocation) error { + return GinkgoError{ + Heading: "Assertion or Panic detected during tree construction", + Message: formatter.F( + `Ginkgo detected a panic while constructing the spec tree. +You may be trying to make an assertion in the body of a container node +(i.e. {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}). + +Please ensure all assertions are inside leaf nodes such as {{bold}}BeforeEach{{/}}, +{{bold}}It{{/}}, etc. + +{{bold}}Here's the content of the panic that was caught:{{/}} +%v`, caughtPanic), + CodeLocation: cl, + DocLink: "no-assertions-in-container-nodes", + } +} + +func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocation) error { + docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite" + if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) { + docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite" + } + + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node within a container node. + +{{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType), + CodeLocation: cl, + DocLink: docLink, + } +} + +func (g ginkgoErrors) SuiteNodeDuringRunPhase(nodeType NodeType, cl CodeLocation) error { + docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite" + if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) { + docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite" + } + + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node within a leaf node after the spec started running. + +{{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType), + CodeLocation: cl, + DocLink: docLink, + } +} + +func (g ginkgoErrors) MultipleBeforeSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error { + return ginkgoErrorMultipleSuiteNodes("setup", nodeType, cl, earlierNodeType, earlierCodeLocation) +} + +func (g ginkgoErrors) MultipleAfterSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error { + return ginkgoErrorMultipleSuiteNodes("teardown", nodeType, cl, earlierNodeType, earlierCodeLocation) +} + +func ginkgoErrorMultipleSuiteNodes(setupOrTeardown string, nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F( + `It looks like you are trying to add a {{bold}}[%s]{{/}} node but +you already have a {{bold}}[%s]{{/}} node defined at: {{gray}}%s{{/}}. + +Ginkgo only allows you to define one suite %s node.`, nodeType, earlierNodeType, earlierCodeLocation, setupOrTeardown), + CodeLocation: cl, + DocLink: "suite-setup-and-cleanup-beforesuite-and-aftersuite", + } +} + +/* Decorator errors */ +func (g ginkgoErrors) InvalidDecoratorForNodeType(cl CodeLocation, nodeType NodeType, decorator string) error { + return GinkgoError{ + Heading: "Invalid Decorator", + Message: formatter.F(`[%s] node cannot be passed a(n) '%s' decorator`, nodeType, decorator), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidDeclarationOfFocusedAndPending(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Combination of Decorators: Focused and Pending", + Message: formatter.F(`[%s] node was decorated with both Focus and Pending. At most one is allowed.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Combination of Decorators: FlakeAttempts and MustPassRepeatedly", + Message: formatter.F(`[%s] node was decorated with both FlakeAttempts and MustPassRepeatedly. At most one is allowed.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator interface{}) error { + return GinkgoError{ + Heading: "Unknown Decorator", + Message: formatter.F(`[%s] node was passed an unknown decorator: '%#v'`, nodeType, decorator), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidBodyTypeForContainer(t reflect.Type, cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[%s] node must be passed {{bold}}func(){{/}} - i.e. functions that take nothing and return nothing. You passed {{bold}}%s{{/}} instead.`, nodeType, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidBodyType(t reflect.Type, cl CodeLocation, nodeType NodeType) error { + mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}" + if nodeType.Is(NodeTypeContainer) { + mustGet = "{{bold}}func(){{/}}" + } + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[%s] node must be passed `+mustGet+`. +You passed {{bold}}%s{{/}} instead.`, nodeType, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteProc1(t reflect.Type, cl CodeLocation) error { + mustGet := "{{bold}}func() []byte{{/}}, {{bold}}func(ctx SpecContext) []byte{{/}}, or {{bold}}func(ctx context.Context) []byte{{/}}, {{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}" + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its first function. +You passed {{bold}}%s{{/}} instead.`, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteAllProcs(t reflect.Type, cl CodeLocation) error { + mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}, {{bold}}func([]byte){{/}}, {{bold}}func(ctx SpecContext, []byte){{/}}, or {{bold}}func(ctx context.Context, []byte){{/}}" + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its second function. +You passed {{bold}}%s{{/}} instead.`, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) MultipleBodyFunctions(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Multiple Functions", + Message: formatter.F(`[%s] node must be passed a single function - but more than one was passed in.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) MissingBodyFunction(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Missing Functions", + Message: formatter.F(`[%s] node must be passed a single function - but none was passed in.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextNode(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod", + Message: formatter.F(`[%s] was passed NodeTimeout, SpecTimeout, or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`, nodeType), + CodeLocation: cl, + DocLink: "spec-timeouts-and-interruptible-nodes", + } +} + +func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextCleanupNode(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod", + Message: formatter.F(`[DeferCleanup] was passed NodeTimeout or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`), + CodeLocation: cl, + DocLink: "spec-timeouts-and-interruptible-nodes", + } +} + +/* Ordered Container errors */ +func (g ginkgoErrors) InvalidSerialNodeInNonSerialOrderedContainer(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Serial Node in Non-Serial Ordered Container", + Message: formatter.F(`[%s] node was decorated with Serial but occurs in an Ordered container that is not marked Serial. Move the Serial decorator to the outer-most Ordered container to mark all ordered specs within the container as serial.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) SetupNodeNotInOrderedContainer(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Setup Node not in Ordered Container", + Message: fmt.Sprintf("[%s] setup nodes must appear inside an Ordered container. They cannot be nested within other containers, even containers in an ordered container.", nodeType), + CodeLocation: cl, + DocLink: "ordered-containers", + } +} + +func (g ginkgoErrors) InvalidContinueOnFailureDecoration(cl CodeLocation) error { + return GinkgoError{ + Heading: "ContinueOnFailure not decorating an outermost Ordered Container", + Message: "ContinueOnFailure can only decorate an Ordered container, and this Ordered container must be the outermost Ordered container.", + CodeLocation: cl, + DocLink: "ordered-containers", + } +} + +/* DeferCleanup errors */ +func (g ginkgoErrors) DeferCleanupInvalidFunction(cl CodeLocation) error { + return GinkgoError{ + Heading: "DeferCleanup requires a valid function", + Message: "You must pass DeferCleanup a function to invoke. This function must return zero or one values - if it does return, it must return an error. The function can take arbitrarily many arguments and you should provide these to DeferCleanup to pass along to the function.", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +func (g ginkgoErrors) PushingCleanupNodeDuringTreeConstruction(cl CodeLocation) error { + return GinkgoError{ + Heading: "DeferCleanup must be called inside a setup or subject node", + Message: "You must call DeferCleanup inside a setup node (e.g. BeforeEach, BeforeSuite, AfterAll...) or a subject node (i.e. It). You can't call DeferCleanup at the top-level or in a container node - use the After* family of setup nodes instead.", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +func (g ginkgoErrors) PushingCleanupInReportingNode(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: fmt.Sprintf("DeferCleanup cannot be called in %s", nodeType), + Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a Reporting node.", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +func (g ginkgoErrors) PushingCleanupInCleanupNode(cl CodeLocation) error { + return GinkgoError{ + Heading: "DeferCleanup cannot be called in a DeferCleanup callback", + Message: "Please inline your cleanup code - Ginkgo doesn't let you call DeferCleanup from within DeferCleanup", + CodeLocation: cl, + DocLink: "cleaning-up-our-cleanup-code-defercleanup", + } +} + +/* ReportEntry errors */ +func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg interface{}) error { + return GinkgoError{ + Heading: "Too Many ReportEntry Values", + Message: formatter.F(`{{bold}}AddGinkgoReport{{/}} can only be given one value. Got unexpected value: %#v`, arg), + CodeLocation: cl, + DocLink: "attaching-data-to-reports", + } +} + +func (g ginkgoErrors) AddReportEntryNotDuringRunPhase(cl CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F(`It looks like you are calling {{bold}}AddGinkgoReport{{/}} outside of a running spec. Make sure you call {{bold}}AddGinkgoReport{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`), + CodeLocation: cl, + DocLink: "attaching-data-to-reports", + } +} + +/* By errors */ +func (g ginkgoErrors) ByNotDuringRunPhase(cl CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F(`It looks like you are calling {{bold}}By{{/}} outside of a running spec. Make sure you call {{bold}}By{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`), + CodeLocation: cl, + DocLink: "documenting-complex-specs-by", + } +} + +/* FileFilter and SkipFilter errors */ +func (g ginkgoErrors) InvalidFileFilter(filter string) error { + return GinkgoError{ + Heading: "Invalid File Filter", + Message: fmt.Sprintf(`The provided file filter: "%s" is invalid. File filters must have the format "file", "file:lines" where "file" is a regular expression that will match against the file path and lines is a comma-separated list of integers (e.g. file:1,5,7) or line-ranges (e.g. file:1-3,5-9) or both (e.g. file:1,5-9)`, filter), + DocLink: "filtering-specs", + } +} + +func (g ginkgoErrors) InvalidFileFilterRegularExpression(filter string, err error) error { + return GinkgoError{ + Heading: "Invalid File Filter Regular Expression", + Message: fmt.Sprintf(`The provided file filter: "%s" included an invalid regular expression. regexp.Compile error: %s`, filter, err), + DocLink: "filtering-specs", + } +} + +/* Label Errors */ +func (g ginkgoErrors) SyntaxErrorParsingLabelFilter(input string, location int, error string) error { + var message string + if location >= 0 { + for i, r := range input { + if i == location { + message += "{{red}}{{bold}}{{underline}}" + } + message += string(r) + if i == location { + message += "{{/}}" + } + } + } else { + message = input + } + message += "\n" + error + return GinkgoError{ + Heading: "Syntax Error Parsing Label Filter", + Message: message, + DocLink: "spec-labels", + } +} + +func (g ginkgoErrors) InvalidLabel(label string, cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Label", + Message: fmt.Sprintf("'%s' is an invalid label. Labels cannot contain of the following characters: '&|!,()/'", label), + CodeLocation: cl, + DocLink: "spec-labels", + } +} + +func (g ginkgoErrors) InvalidEmptyLabel(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Empty Label", + Message: "Labels cannot be empty", + CodeLocation: cl, + DocLink: "spec-labels", + } +} + +/* Table errors */ +func (g ginkgoErrors) MultipleEntryBodyFunctionsForTable(cl CodeLocation) error { + return GinkgoError{ + Heading: "DescribeTable passed multiple functions", + Message: "It looks like you are passing multiple functions into DescribeTable. Only one function can be passed in. This function will be called for each Entry in the table.", + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) InvalidEntryDescription(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Entry description", + Message: "Entry description functions must be a string, a function that accepts the entry parameters and returns a string, or nil.", + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) MissingParametersForTableFunction(cl CodeLocation) error { + return GinkgoError{ + Heading: "No parameters have been passed to the Table Function", + Message: "The Table Function expected at least 1 parameter", + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) IncorrectParameterTypeForTable(i int, name string, cl CodeLocation) error { + return GinkgoError{ + Heading: "DescribeTable passed incorrect parameter type", + Message: fmt.Sprintf("Parameter #%d passed to DescribeTable is of incorrect type <%s>", i, name), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) TooFewParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Too few parameters passed in to %s", kind), + Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) TooManyParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Too many parameters passed in to %s", kind), + Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) IncorrectParameterTypeToTableFunction(i int, expected, actual reflect.Type, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind), + Message: fmt.Sprintf("The %s expected parameter #%d to be of type <%s> but you passed in <%s>", kind, i, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) IncorrectVariadicParameterTypeToTableFunction(expected, actual reflect.Type, kind string, cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind), + Message: fmt.Sprintf("The %s expected its variadic parameters to be of type <%s> but you passed in <%s>", kind, expected, actual), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +/* Parallel Synchronization errors */ + +func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error { + return GinkgoError{ + Heading: "Test Report unavailable because a Ginkgo parallel process disappeared", + Message: "The aggregated report could not be fetched for a ReportAfterSuite node. A Ginkgo parallel process disappeared before it could finish reporting.", + } +} + +func (g ginkgoErrors) SynchronizedBeforeSuiteFailedOnProc1() error { + return GinkgoError{ + Heading: "SynchronizedBeforeSuite failed on Ginkgo parallel process #1", + Message: "The first SynchronizedBeforeSuite function running on Ginkgo parallel process #1 failed. This suite will now abort.", + } +} + +func (g ginkgoErrors) SynchronizedBeforeSuiteDisappearedOnProc1() error { + return GinkgoError{ + Heading: "Process #1 disappeared before SynchronizedBeforeSuite could report back", + Message: "Ginkgo parallel process #1 disappeared before the first SynchronizedBeforeSuite function completed. This suite will now abort.", + } +} + +/* Configuration errors */ + +func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value interface{}) error { + return GinkgoError{ + Heading: "Unknown Type passed to RunSpecs", + Message: fmt.Sprintf("RunSpecs() accepts labels, and configuration of type types.SuiteConfig and/or types.ReporterConfig.\n You passed in: %v", value), + } +} + +var sharedParallelErrorMessage = "It looks like you are trying to run specs in parallel with go test.\nThis is unsupported and you should use the ginkgo CLI instead." + +func (g ginkgoErrors) InvalidParallelTotalConfiguration() error { + return GinkgoError{ + Heading: "-ginkgo.parallel.total must be >= 1", + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) InvalidParallelProcessConfiguration() error { + return GinkgoError{ + Heading: "-ginkgo.parallel.process is one-indexed and must be <= ginkgo.parallel.total", + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) MissingParallelHostConfiguration() error { + return GinkgoError{ + Heading: "-ginkgo.parallel.host is missing", + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) UnreachableParallelHost(host string) error { + return GinkgoError{ + Heading: "Could not reach ginkgo.parallel.host:" + host, + Message: sharedParallelErrorMessage, + DocLink: "spec-parallelization", + } +} + +func (g ginkgoErrors) DryRunInParallelConfiguration() error { + return GinkgoError{ + Heading: "Ginkgo only performs -dryRun in serial mode.", + Message: "Please try running ginkgo -dryRun again, but without -p or -procs to ensure the suite is running in series.", + } +} + +func (g ginkgoErrors) GracePeriodCannotBeZero() error { + return GinkgoError{ + Heading: "Ginkgo requires a positive --grace-period.", + Message: "Please set --grace-period to a positive duration. The default is 30s.", + } +} + +func (g ginkgoErrors) ConflictingVerbosityConfiguration() error { + return GinkgoError{ + Heading: "Conflicting reporter verbosity settings.", + Message: "You can't set more than one of -v, -vv and --succinct. Please pick one!", + } +} + +func (g ginkgoErrors) InvalidOutputInterceptorModeConfiguration(value string) error { + return GinkgoError{ + Heading: fmt.Sprintf("Invalid value '%s' for --output-interceptor-mode.", value), + Message: "You must choose one of 'dup', 'swap', or 'none'.", + } +} + +func (g ginkgoErrors) InvalidGoFlagCount() error { + return GinkgoError{ + Heading: "Use of go test -count", + Message: "Ginkgo does not support using go test -count to rerun suites. Only -count=1 is allowed. To repeat suite runs, please use the ginkgo cli and `ginkgo -until-it-fails` or `ginkgo -repeat=N`.", + } +} + +func (g ginkgoErrors) InvalidGoFlagParallel() error { + return GinkgoError{ + Heading: "Use of go test -parallel", + Message: "Go test's implementation of parallelization does not actually parallelize Ginkgo specs. Please use the ginkgo cli and `ginkgo -p` or `ginkgo -procs=N` instead.", + } +} + +func (g ginkgoErrors) BothRepeatAndUntilItFails() error { + return GinkgoError{ + Heading: "--repeat and --until-it-fails are both set", + Message: "--until-it-fails directs Ginkgo to rerun specs indefinitely until they fail. --repeat directs Ginkgo to rerun specs a set number of times. You can't set both... which would you like?", + } +} + +/* Stack-Trace parsing errors */ + +func (g ginkgoErrors) FailedToParseStackTrace(message string) error { + return GinkgoError{ + Heading: "Failed to Parse Stack Trace", + Message: message, + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go new file mode 100644 index 00000000..cc21df71 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go @@ -0,0 +1,106 @@ +package types + +import ( + "regexp" + "strconv" + "strings" +) + +func ParseFileFilters(filters []string) (FileFilters, error) { + ffs := FileFilters{} + for _, filter := range filters { + ff := FileFilter{} + if filter == "" { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + components := strings.Split(filter, ":") + if !(len(components) == 1 || len(components) == 2) { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + + var err error + ff.Filename, err = regexp.Compile(components[0]) + if err != nil { + return nil, err + } + if len(components) == 2 { + lineFilters := strings.Split(components[1], ",") + for _, lineFilter := range lineFilters { + components := strings.Split(lineFilter, "-") + if len(components) == 1 { + line, err := strconv.Atoi(strings.TrimSpace(components[0])) + if err != nil { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + ff.LineFilters = append(ff.LineFilters, LineFilter{line, line + 1}) + } else if len(components) == 2 { + line1, err := strconv.Atoi(strings.TrimSpace(components[0])) + if err != nil { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + line2, err := strconv.Atoi(strings.TrimSpace(components[1])) + if err != nil { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + ff.LineFilters = append(ff.LineFilters, LineFilter{line1, line2}) + } else { + return nil, GinkgoErrors.InvalidFileFilter(filter) + } + } + } + ffs = append(ffs, ff) + } + return ffs, nil +} + +type FileFilter struct { + Filename *regexp.Regexp + LineFilters LineFilters +} + +func (f FileFilter) Matches(locations []CodeLocation) bool { + for _, location := range locations { + if f.Filename.MatchString(location.FileName) && + f.LineFilters.Matches(location.LineNumber) { + return true + } + + } + return false +} + +type FileFilters []FileFilter + +func (ffs FileFilters) Matches(locations []CodeLocation) bool { + for _, ff := range ffs { + if ff.Matches(locations) { + return true + } + } + + return false +} + +type LineFilter struct { + Min int + Max int +} + +func (lf LineFilter) Matches(line int) bool { + return lf.Min <= line && line < lf.Max +} + +type LineFilters []LineFilter + +func (lfs LineFilters) Matches(line int) bool { + if len(lfs) == 0 { + return true + } + + for _, lf := range lfs { + if lf.Matches(line) { + return true + } + } + return false +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go new file mode 100644 index 00000000..9186ae87 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go @@ -0,0 +1,489 @@ +package types + +import ( + "flag" + "fmt" + "io" + "reflect" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/formatter" +) + +type GinkgoFlag struct { + Name string + KeyPath string + SectionKey string + + Usage string + UsageArgument string + UsageDefaultValue string + + DeprecatedName string + DeprecatedDocLink string + DeprecatedVersion string + + ExportAs string +} + +type GinkgoFlags []GinkgoFlag + +func (f GinkgoFlags) CopyAppend(flags ...GinkgoFlag) GinkgoFlags { + out := GinkgoFlags{} + out = append(out, f...) + out = append(out, flags...) + return out +} + +func (f GinkgoFlags) WithPrefix(prefix string) GinkgoFlags { + if prefix == "" { + return f + } + out := GinkgoFlags{} + for _, flag := range f { + if flag.Name != "" { + flag.Name = prefix + "." + flag.Name + } + if flag.DeprecatedName != "" { + flag.DeprecatedName = prefix + "." + flag.DeprecatedName + } + if flag.ExportAs != "" { + flag.ExportAs = prefix + "." + flag.ExportAs + } + out = append(out, flag) + } + return out +} + +func (f GinkgoFlags) SubsetWithNames(names ...string) GinkgoFlags { + out := GinkgoFlags{} + for _, flag := range f { + for _, name := range names { + if flag.Name == name { + out = append(out, flag) + break + } + } + } + return out +} + +type GinkgoFlagSection struct { + Key string + Style string + Succinct bool + Heading string + Description string +} + +type GinkgoFlagSections []GinkgoFlagSection + +func (gfs GinkgoFlagSections) Lookup(key string) (GinkgoFlagSection, bool) { + for _, section := range gfs { + if section.Key == key { + return section, true + } + } + + return GinkgoFlagSection{}, false +} + +type GinkgoFlagSet struct { + flags GinkgoFlags + bindings interface{} + + sections GinkgoFlagSections + extraGoFlagsSection GinkgoFlagSection + + flagSet *flag.FlagSet +} + +// Call NewGinkgoFlagSet to create GinkgoFlagSet that creates and binds to it's own *flag.FlagSet +func NewGinkgoFlagSet(flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections) (GinkgoFlagSet, error) { + return bindFlagSet(GinkgoFlagSet{ + flags: flags, + bindings: bindings, + sections: sections, + }, nil) +} + +// Call NewGinkgoFlagSet to create GinkgoFlagSet that extends an existing *flag.FlagSet +func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections, extraGoFlagsSection GinkgoFlagSection) (GinkgoFlagSet, error) { + return bindFlagSet(GinkgoFlagSet{ + flags: flags, + bindings: bindings, + sections: sections, + extraGoFlagsSection: extraGoFlagsSection, + }, flagSet) +} + +func bindFlagSet(f GinkgoFlagSet, flagSet *flag.FlagSet) (GinkgoFlagSet, error) { + if flagSet == nil { + f.flagSet = flag.NewFlagSet("", flag.ContinueOnError) + //suppress all output as Ginkgo is responsible for formatting usage + f.flagSet.SetOutput(io.Discard) + } else { + f.flagSet = flagSet + //we're piggybacking on an existing flagset (typically go test) so we have limited control + //on user feedback + f.flagSet.Usage = f.substituteUsage + } + + for _, flag := range f.flags { + name := flag.Name + + deprecatedUsage := "[DEPRECATED]" + deprecatedName := flag.DeprecatedName + if name != "" { + deprecatedUsage = fmt.Sprintf("[DEPRECATED] use --%s instead", name) + } else if flag.Usage != "" { + deprecatedUsage += " " + flag.Usage + } + + value, ok := valueAtKeyPath(f.bindings, flag.KeyPath) + if !ok { + return GinkgoFlagSet{}, fmt.Errorf("could not load KeyPath: %s", flag.KeyPath) + } + + iface, addr := value.Interface(), value.Addr().Interface() + + switch value.Type() { + case reflect.TypeOf(string("")): + if name != "" { + f.flagSet.StringVar(addr.(*string), name, iface.(string), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.StringVar(addr.(*string), deprecatedName, iface.(string), deprecatedUsage) + } + case reflect.TypeOf(int64(0)): + if name != "" { + f.flagSet.Int64Var(addr.(*int64), name, iface.(int64), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.Int64Var(addr.(*int64), deprecatedName, iface.(int64), deprecatedUsage) + } + case reflect.TypeOf(float64(0)): + if name != "" { + f.flagSet.Float64Var(addr.(*float64), name, iface.(float64), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.Float64Var(addr.(*float64), deprecatedName, iface.(float64), deprecatedUsage) + } + case reflect.TypeOf(int(0)): + if name != "" { + f.flagSet.IntVar(addr.(*int), name, iface.(int), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.IntVar(addr.(*int), deprecatedName, iface.(int), deprecatedUsage) + } + case reflect.TypeOf(bool(true)): + if name != "" { + f.flagSet.BoolVar(addr.(*bool), name, iface.(bool), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.BoolVar(addr.(*bool), deprecatedName, iface.(bool), deprecatedUsage) + } + case reflect.TypeOf(time.Duration(0)): + if name != "" { + f.flagSet.DurationVar(addr.(*time.Duration), name, iface.(time.Duration), flag.Usage) + } + if deprecatedName != "" { + f.flagSet.DurationVar(addr.(*time.Duration), deprecatedName, iface.(time.Duration), deprecatedUsage) + } + + case reflect.TypeOf([]string{}): + if name != "" { + f.flagSet.Var(stringSliceVar{value}, name, flag.Usage) + } + if deprecatedName != "" { + f.flagSet.Var(stringSliceVar{value}, deprecatedName, deprecatedUsage) + } + default: + return GinkgoFlagSet{}, fmt.Errorf("unsupported type %T", iface) + } + } + + return f, nil +} + +func (f GinkgoFlagSet) IsZero() bool { + return f.flagSet == nil +} + +func (f GinkgoFlagSet) WasSet(name string) bool { + found := false + f.flagSet.Visit(func(f *flag.Flag) { + if f.Name == name { + found = true + } + }) + + return found +} + +func (f GinkgoFlagSet) Lookup(name string) *flag.Flag { + return f.flagSet.Lookup(name) +} + +func (f GinkgoFlagSet) Parse(args []string) ([]string, error) { + if f.IsZero() { + return args, nil + } + err := f.flagSet.Parse(args) + if err != nil { + return []string{}, err + } + return f.flagSet.Args(), nil +} + +func (f GinkgoFlagSet) ValidateDeprecations(deprecationTracker *DeprecationTracker) { + if f.IsZero() { + return + } + f.flagSet.Visit(func(flag *flag.Flag) { + for _, ginkgoFlag := range f.flags { + if ginkgoFlag.DeprecatedName != "" && strings.HasSuffix(flag.Name, ginkgoFlag.DeprecatedName) { + message := fmt.Sprintf("--%s is deprecated", ginkgoFlag.DeprecatedName) + if ginkgoFlag.Name != "" { + message = fmt.Sprintf("--%s is deprecated, use --%s instead", ginkgoFlag.DeprecatedName, ginkgoFlag.Name) + } else if ginkgoFlag.Usage != "" { + message += " " + ginkgoFlag.Usage + } + + deprecationTracker.TrackDeprecation(Deprecation{ + Message: message, + DocLink: ginkgoFlag.DeprecatedDocLink, + Version: ginkgoFlag.DeprecatedVersion, + }) + } + } + }) +} + +func (f GinkgoFlagSet) Usage() string { + if f.IsZero() { + return "" + } + groupedFlags := map[GinkgoFlagSection]GinkgoFlags{} + ungroupedFlags := GinkgoFlags{} + managedFlags := map[string]bool{} + extraGoFlags := []*flag.Flag{} + + for _, flag := range f.flags { + managedFlags[flag.Name] = true + managedFlags[flag.DeprecatedName] = true + + if flag.Name == "" { + continue + } + + section, ok := f.sections.Lookup(flag.SectionKey) + if ok { + groupedFlags[section] = append(groupedFlags[section], flag) + } else { + ungroupedFlags = append(ungroupedFlags, flag) + } + } + + f.flagSet.VisitAll(func(flag *flag.Flag) { + if !managedFlags[flag.Name] { + extraGoFlags = append(extraGoFlags, flag) + } + }) + + out := "" + for _, section := range f.sections { + flags := groupedFlags[section] + if len(flags) == 0 { + continue + } + out += f.usageForSection(section) + if section.Succinct { + succinctFlags := []string{} + for _, flag := range flags { + if flag.Name != "" { + succinctFlags = append(succinctFlags, fmt.Sprintf("--%s", flag.Name)) + } + } + out += formatter.Fiw(1, formatter.COLS, section.Style+strings.Join(succinctFlags, ", ")+"{{/}}\n") + } else { + for _, flag := range flags { + out += f.usageForFlag(flag, section.Style) + } + } + out += "\n" + } + if len(ungroupedFlags) > 0 { + for _, flag := range ungroupedFlags { + out += f.usageForFlag(flag, "") + } + out += "\n" + } + if len(extraGoFlags) > 0 { + out += f.usageForSection(f.extraGoFlagsSection) + for _, goFlag := range extraGoFlags { + out += f.usageForGoFlag(goFlag) + } + } + + return out +} + +func (f GinkgoFlagSet) substituteUsage() { + fmt.Fprintln(f.flagSet.Output(), f.Usage()) +} + +func valueAtKeyPath(root interface{}, keyPath string) (reflect.Value, bool) { + if len(keyPath) == 0 { + return reflect.Value{}, false + } + + val := reflect.ValueOf(root) + components := strings.Split(keyPath, ".") + for _, component := range components { + val = reflect.Indirect(val) + switch val.Kind() { + case reflect.Map: + val = val.MapIndex(reflect.ValueOf(component)) + if val.Kind() == reflect.Interface { + val = reflect.ValueOf(val.Interface()) + } + case reflect.Struct: + val = val.FieldByName(component) + default: + return reflect.Value{}, false + } + if (val == reflect.Value{}) { + return reflect.Value{}, false + } + } + + return val, true +} + +func (f GinkgoFlagSet) usageForSection(section GinkgoFlagSection) string { + out := formatter.F(section.Style + "{{bold}}{{underline}}" + section.Heading + "{{/}}\n") + if section.Description != "" { + out += formatter.Fiw(0, formatter.COLS, section.Description+"\n") + } + return out +} + +func (f GinkgoFlagSet) usageForFlag(flag GinkgoFlag, style string) string { + argument := flag.UsageArgument + defValue := flag.UsageDefaultValue + if argument == "" { + value, _ := valueAtKeyPath(f.bindings, flag.KeyPath) + switch value.Type() { + case reflect.TypeOf(string("")): + argument = "string" + case reflect.TypeOf(int64(0)), reflect.TypeOf(int(0)): + argument = "int" + case reflect.TypeOf(time.Duration(0)): + argument = "duration" + case reflect.TypeOf(float64(0)): + argument = "float" + case reflect.TypeOf([]string{}): + argument = "string" + } + } + if argument != "" { + argument = "[" + argument + "] " + } + if defValue != "" { + defValue = fmt.Sprintf("(default: %s)", defValue) + } + hyphens := "--" + if len(flag.Name) == 1 { + hyphens = "-" + } + + out := formatter.Fi(1, style+"%s%s{{/}} %s{{gray}}%s{{/}}\n", hyphens, flag.Name, argument, defValue) + out += formatter.Fiw(2, formatter.COLS, "{{light-gray}}%s{{/}}\n", flag.Usage) + return out +} + +func (f GinkgoFlagSet) usageForGoFlag(goFlag *flag.Flag) string { + //Taken directly from the flag package + out := fmt.Sprintf(" -%s", goFlag.Name) + name, usage := flag.UnquoteUsage(goFlag) + if len(name) > 0 { + out += " " + name + } + if len(out) <= 4 { + out += "\t" + } else { + out += "\n \t" + } + out += strings.ReplaceAll(usage, "\n", "\n \t") + out += "\n" + return out +} + +type stringSliceVar struct { + slice reflect.Value +} + +func (ssv stringSliceVar) String() string { return "" } +func (ssv stringSliceVar) Set(s string) error { + ssv.slice.Set(reflect.AppendSlice(ssv.slice, reflect.ValueOf([]string{s}))) + return nil +} + +//given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. +func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) { + result := []string{} + for _, flag := range flags { + name := flag.ExportAs + if name == "" { + name = flag.Name + } + if name == "" { + continue + } + + value, ok := valueAtKeyPath(bindings, flag.KeyPath) + if !ok { + return []string{}, fmt.Errorf("could not load KeyPath: %s", flag.KeyPath) + } + + iface := value.Interface() + switch value.Type() { + case reflect.TypeOf(string("")): + if iface.(string) != "" { + result = append(result, fmt.Sprintf("--%s=%s", name, iface)) + } + case reflect.TypeOf(int64(0)): + if iface.(int64) != 0 { + result = append(result, fmt.Sprintf("--%s=%d", name, iface)) + } + case reflect.TypeOf(float64(0)): + if iface.(float64) != 0 { + result = append(result, fmt.Sprintf("--%s=%f", name, iface)) + } + case reflect.TypeOf(int(0)): + if iface.(int) != 0 { + result = append(result, fmt.Sprintf("--%s=%d", name, iface)) + } + case reflect.TypeOf(bool(true)): + if iface.(bool) { + result = append(result, fmt.Sprintf("--%s", name)) + } + case reflect.TypeOf(time.Duration(0)): + if iface.(time.Duration) != time.Duration(0) { + result = append(result, fmt.Sprintf("--%s=%s", name, iface)) + } + + case reflect.TypeOf([]string{}): + strings := iface.([]string) + for _, s := range strings { + result = append(result, fmt.Sprintf("--%s=%s", name, s)) + } + default: + return []string{}, fmt.Errorf("unsupported type %T", iface) + } + } + + return result, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go new file mode 100644 index 00000000..b0d3b651 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go @@ -0,0 +1,358 @@ +package types + +import ( + "fmt" + "regexp" + "strings" +) + +var DEBUG_LABEL_FILTER_PARSING = false + +type LabelFilter func([]string) bool + +func matchLabelAction(label string) LabelFilter { + expected := strings.ToLower(label) + return func(labels []string) bool { + for i := range labels { + if strings.ToLower(labels[i]) == expected { + return true + } + } + return false + } +} + +func matchLabelRegexAction(regex *regexp.Regexp) LabelFilter { + return func(labels []string) bool { + for i := range labels { + if regex.MatchString(labels[i]) { + return true + } + } + return false + } +} + +func notAction(filter LabelFilter) LabelFilter { + return func(labels []string) bool { return !filter(labels) } +} + +func andAction(a, b LabelFilter) LabelFilter { + return func(labels []string) bool { return a(labels) && b(labels) } +} + +func orAction(a, b LabelFilter) LabelFilter { + return func(labels []string) bool { return a(labels) || b(labels) } +} + +type lfToken uint + +const ( + lfTokenInvalid lfToken = iota + + lfTokenRoot + lfTokenOpenGroup + lfTokenCloseGroup + lfTokenNot + lfTokenAnd + lfTokenOr + lfTokenRegexp + lfTokenLabel + lfTokenEOF +) + +func (l lfToken) Precedence() int { + switch l { + case lfTokenRoot, lfTokenOpenGroup: + return 0 + case lfTokenOr: + return 1 + case lfTokenAnd: + return 2 + case lfTokenNot: + return 3 + } + return -1 +} + +func (l lfToken) String() string { + switch l { + case lfTokenRoot: + return "ROOT" + case lfTokenOpenGroup: + return "(" + case lfTokenCloseGroup: + return ")" + case lfTokenNot: + return "!" + case lfTokenAnd: + return "&&" + case lfTokenOr: + return "||" + case lfTokenRegexp: + return "/regexp/" + case lfTokenLabel: + return "label" + case lfTokenEOF: + return "EOF" + } + return "INVALID" +} + +type treeNode struct { + token lfToken + location int + value string + + parent *treeNode + leftNode *treeNode + rightNode *treeNode +} + +func (tn *treeNode) setRightNode(node *treeNode) { + tn.rightNode = node + node.parent = tn +} + +func (tn *treeNode) setLeftNode(node *treeNode) { + tn.leftNode = node + node.parent = tn +} + +func (tn *treeNode) firstAncestorWithPrecedenceLEQ(precedence int) *treeNode { + if tn.token.Precedence() <= precedence { + return tn + } + return tn.parent.firstAncestorWithPrecedenceLEQ(precedence) +} + +func (tn *treeNode) firstUnmatchedOpenNode() *treeNode { + if tn.token == lfTokenOpenGroup { + return tn + } + if tn.parent == nil { + return nil + } + return tn.parent.firstUnmatchedOpenNode() +} + +func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) { + switch tn.token { + case lfTokenOpenGroup: + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, "Mismatched '(' - could not find matching ')'.") + case lfTokenLabel: + return matchLabelAction(tn.value), nil + case lfTokenRegexp: + re, err := regexp.Compile(tn.value) + if err != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err)) + } + return matchLabelRegexAction(re), nil + } + + if tn.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, -1, "Unexpected EOF.") + } + rightLF, err := tn.rightNode.constructLabelFilter(input) + if err != nil { + return nil, err + } + + switch tn.token { + case lfTokenRoot, lfTokenCloseGroup: + return rightLF, nil + case lfTokenNot: + return notAction(rightLF), nil + } + + if tn.leftNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Malformed tree - '%s' is missing left operand.", tn.token)) + } + leftLF, err := tn.leftNode.constructLabelFilter(input) + if err != nil { + return nil, err + } + + switch tn.token { + case lfTokenAnd: + return andAction(leftLF, rightLF), nil + case lfTokenOr: + return orAction(leftLF, rightLF), nil + } + + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Invalid token '%s'.", tn.token)) +} + +func (tn *treeNode) tokenString() string { + out := fmt.Sprintf("<%s", tn.token) + if tn.value != "" { + out += " | " + tn.value + } + out += ">" + return out +} + +func (tn *treeNode) toString(indent int) string { + out := tn.tokenString() + "\n" + if tn.leftNode != nil { + out += fmt.Sprintf("%s |_(L)_%s", strings.Repeat(" ", indent), tn.leftNode.toString(indent+1)) + } + if tn.rightNode != nil { + out += fmt.Sprintf("%s |_(R)_%s", strings.Repeat(" ", indent), tn.rightNode.toString(indent+1)) + } + return out +} + +func tokenize(input string) func() (*treeNode, error) { + runes, i := []rune(input), 0 + + peekIs := func(r rune) bool { + if i+1 < len(runes) { + return runes[i+1] == r + } + return false + } + + consumeUntil := func(cutset string) (string, int) { + j := i + for ; j < len(runes); j++ { + if strings.IndexRune(cutset, runes[j]) >= 0 { + break + } + } + return string(runes[i:j]), j - i + } + + return func() (*treeNode, error) { + for i < len(runes) && runes[i] == ' ' { + i += 1 + } + + if i >= len(runes) { + return &treeNode{token: lfTokenEOF}, nil + } + + node := &treeNode{location: i} + switch runes[i] { + case '&': + if !peekIs('&') { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '&'. Did you mean '&&'?") + } + i += 2 + node.token = lfTokenAnd + case '|': + if !peekIs('|') { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '|'. Did you mean '||'?") + } + i += 2 + node.token = lfTokenOr + case '!': + i += 1 + node.token = lfTokenNot + case ',': + i += 1 + node.token = lfTokenOr + case '(': + i += 1 + node.token = lfTokenOpenGroup + case ')': + i += 1 + node.token = lfTokenCloseGroup + case '/': + i += 1 + value, n := consumeUntil("/") + i += n + 1 + node.token, node.value = lfTokenRegexp, value + default: + value, n := consumeUntil("&|!,()/") + i += n + node.token, node.value = lfTokenLabel, strings.TrimSpace(value) + } + return node, nil + } +} + +func MustParseLabelFilter(input string) LabelFilter { + filter, err := ParseLabelFilter(input) + if err != nil { + panic(err) + } + return filter +} + +func ParseLabelFilter(input string) (LabelFilter, error) { + if DEBUG_LABEL_FILTER_PARSING { + fmt.Println("\n==============") + fmt.Println("Input: ", input) + fmt.Print("Tokens: ") + } + if input == "" { + return func(_ []string) bool { return true }, nil + } + nextToken := tokenize(input) + + root := &treeNode{token: lfTokenRoot} + current := root +LOOP: + for { + node, err := nextToken() + if err != nil { + return nil, err + } + + if DEBUG_LABEL_FILTER_PARSING { + fmt.Print(node.tokenString() + " ") + } + + switch node.token { + case lfTokenEOF: + break LOOP + case lfTokenLabel, lfTokenRegexp: + if current.rightNode != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.") + } + current.setRightNode(node) + case lfTokenNot, lfTokenOpenGroup: + if current.rightNode != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Invalid token '%s'.", node.token)) + } + current.setRightNode(node) + current = node + case lfTokenAnd, lfTokenOr: + if current.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Operator '%s' missing left hand operand.", node.token)) + } + nodeToStealFrom := current.firstAncestorWithPrecedenceLEQ(node.token.Precedence()) + node.setLeftNode(nodeToStealFrom.rightNode) + nodeToStealFrom.setRightNode(node) + current = node + case lfTokenCloseGroup: + firstUnmatchedOpenNode := current.firstUnmatchedOpenNode() + if firstUnmatchedOpenNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Mismatched ')' - could not find matching '('.") + } + if firstUnmatchedOpenNode == current && current.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found empty '()' group.") + } + firstUnmatchedOpenNode.token = lfTokenCloseGroup //signify the group is now closed + current = firstUnmatchedOpenNode.parent + default: + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unknown token '%s'.", node.token)) + } + } + if DEBUG_LABEL_FILTER_PARSING { + fmt.Printf("\n Tree:\n%s", root.toString(0)) + } + return root.constructLabelFilter(input) +} + +func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) { + out := strings.TrimSpace(label) + if out == "" { + return "", GinkgoErrors.InvalidEmptyLabel(cl) + } + if strings.ContainsAny(out, "&|!,()/") { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + return out, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go new file mode 100644 index 00000000..7b1524b5 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go @@ -0,0 +1,190 @@ +package types + +import ( + "encoding/json" + "fmt" + "time" +) + +// ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports +// and across the network connection when running in parallel +type ReportEntryValue struct { + raw interface{} //unexported to prevent gob from freaking out about unregistered structs + AsJSON string + Representation string +} + +func WrapEntryValue(value interface{}) ReportEntryValue { + return ReportEntryValue{ + raw: value, + } +} + +func (rev ReportEntryValue) GetRawValue() interface{} { + return rev.raw +} + +func (rev ReportEntryValue) String() string { + if rev.raw == nil { + return "" + } + if colorableStringer, ok := rev.raw.(ColorableStringer); ok { + return colorableStringer.ColorableString() + } + + if stringer, ok := rev.raw.(fmt.Stringer); ok { + return stringer.String() + } + if rev.Representation != "" { + return rev.Representation + } + return fmt.Sprintf("%+v", rev.raw) +} + +func (rev ReportEntryValue) MarshalJSON() ([]byte, error) { + //All this to capture the representation at encoding-time, not creating time + //This way users can Report on pointers and get their final values at reporting-time + out := struct { + AsJSON string + Representation string + }{ + Representation: rev.String(), + } + asJSON, err := json.Marshal(rev.raw) + if err != nil { + return nil, err + } + out.AsJSON = string(asJSON) + + return json.Marshal(out) +} + +func (rev *ReportEntryValue) UnmarshalJSON(data []byte) error { + in := struct { + AsJSON string + Representation string + }{} + err := json.Unmarshal(data, &in) + if err != nil { + return err + } + rev.AsJSON = in.AsJSON + rev.Representation = in.Representation + return json.Unmarshal([]byte(in.AsJSON), &(rev.raw)) +} + +func (rev ReportEntryValue) GobEncode() ([]byte, error) { + return rev.MarshalJSON() +} + +func (rev *ReportEntryValue) GobDecode(data []byte) error { + return rev.UnmarshalJSON(data) +} + +// ReportEntry captures information attached to `SpecReport` via `AddReportEntry` +type ReportEntry struct { + // Visibility captures the visibility policy for this ReportEntry + Visibility ReportEntryVisibility + // Location captures the location of the AddReportEntry call + Location CodeLocation + + Time time.Time //need this for backwards compatibility + TimelineLocation TimelineLocation + + // Name captures the name of this report + Name string + // Value captures the (optional) object passed into AddReportEntry - this can be + // anything the user wants. The value passed to AddReportEntry is wrapped in a ReportEntryValue to make + // encoding/decoding the value easier. To access the raw value call entry.GetRawValue() + Value ReportEntryValue +} + +// ColorableStringer is an interface that ReportEntry values can satisfy. If they do then ColorableString() is used to generate their representation. +type ColorableStringer interface { + ColorableString() string +} + +// StringRepresentation() returns the string representation of the value associated with the ReportEntry -- +// if value is nil, empty string is returned +// if value is a `ColorableStringer` then `Value.ColorableString()` is returned +// if value is a `fmt.Stringer` then `Value.String()` is returned +// otherwise the value is formatted with "%+v" +func (entry ReportEntry) StringRepresentation() string { + return entry.Value.String() +} + +// GetRawValue returns the Value object that was passed to AddReportEntry +// If called in-process this will be the same object that was passed into AddReportEntry. +// If used from a rehydrated JSON file _or_ in a ReportAfterSuite when running in parallel this will be +// a JSON-decoded {}interface. If you want to reconstitute your original object you can decode the entry.Value.AsJSON +// field yourself. +func (entry ReportEntry) GetRawValue() interface{} { + return entry.Value.GetRawValue() +} + +func (entry ReportEntry) GetTimelineLocation() TimelineLocation { + return entry.TimelineLocation +} + +type ReportEntries []ReportEntry + +func (re ReportEntries) HasVisibility(visibilities ...ReportEntryVisibility) bool { + for _, entry := range re { + if entry.Visibility.Is(visibilities...) { + return true + } + } + return false +} + +func (re ReportEntries) WithVisibility(visibilities ...ReportEntryVisibility) ReportEntries { + out := ReportEntries{} + + for _, entry := range re { + if entry.Visibility.Is(visibilities...) { + out = append(out, entry) + } + } + + return out +} + +// ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter +type ReportEntryVisibility uint + +const ( + // Always print out this ReportEntry + ReportEntryVisibilityAlways ReportEntryVisibility = iota + // Only print out this ReportEntry if the spec fails or if the test is run with -v + ReportEntryVisibilityFailureOrVerbose + // Never print out this ReportEntry (note that ReportEntrys are always encoded in machine readable reports (e.g. JSON, JUnit, etc.)) + ReportEntryVisibilityNever +) + +var revEnumSupport = NewEnumSupport(map[uint]string{ + uint(ReportEntryVisibilityAlways): "always", + uint(ReportEntryVisibilityFailureOrVerbose): "failure-or-verbose", + uint(ReportEntryVisibilityNever): "never", +}) + +func (rev ReportEntryVisibility) String() string { + return revEnumSupport.String(uint(rev)) +} +func (rev *ReportEntryVisibility) UnmarshalJSON(b []byte) error { + out, err := revEnumSupport.UnmarshJSON(b) + *rev = ReportEntryVisibility(out) + return err +} +func (rev ReportEntryVisibility) MarshalJSON() ([]byte, error) { + return revEnumSupport.MarshJSON(uint(rev)) +} + +func (v ReportEntryVisibility) Is(visibilities ...ReportEntryVisibility) bool { + for _, visibility := range visibilities { + if v == visibility { + return true + } + } + + return false +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go new file mode 100644 index 00000000..aae69b04 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -0,0 +1,914 @@ +package types + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + "time" +) + +const GINKGO_FOCUS_EXIT_CODE = 197 +const GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999" + +// Report captures information about a Ginkgo test run +type Report struct { + //SuitePath captures the absolute path to the test suite + SuitePath string + + //SuiteDescription captures the description string passed to the DSL's RunSpecs() function + SuiteDescription string + + //SuiteLabels captures any labels attached to the suite by the DSL's RunSpecs() function + SuiteLabels []string + + //SuiteSucceeded captures the success or failure status of the test run + //If true, the test run is considered successful. + //If false, the test run is considered unsuccessful + SuiteSucceeded bool + + //SuiteHasProgrammaticFocus captures whether the test suite has a test or set of tests that are programmatically focused + //(i.e an `FIt` or an `FDescribe` + SuiteHasProgrammaticFocus bool + + //SpecialSuiteFailureReasons may contain special failure reasons + //For example, a test suite might be considered "failed" even if none of the individual specs + //have a failure state. For example, if the user has configured --fail-on-pending the test suite + //will have failed if there are pending tests even though all non-pending tests may have passed. In such + //cases, Ginkgo populates SpecialSuiteFailureReasons with a clear message indicating the reason for the failure. + //SpecialSuiteFailureReasons is also populated if the test suite is interrupted by the user. + //Since multiple special failure reasons can occur, this field is a slice. + SpecialSuiteFailureReasons []string + + //PreRunStats contains a set of stats captured before the test run begins. This is primarily used + //by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs) + //and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters. + PreRunStats PreRunStats + + //StartTime and EndTime capture the start and end time of the test run + StartTime time.Time + EndTime time.Time + + //RunTime captures the duration of the test run + RunTime time.Duration + + //SuiteConfig captures the Ginkgo configuration governing this test run + //SuiteConfig includes information necessary for reproducing an identical test run, + //such as the random seed and any filters applied during the test run + SuiteConfig SuiteConfig + + //SpecReports is a list of all SpecReports generated by this test run + //It is empty when the SuiteReport is provided to ReportBeforeSuite + SpecReports SpecReports +} + +// PreRunStats contains a set of stats captured before the test run begins. This is primarily used +// by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs) +// and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters. +type PreRunStats struct { + TotalSpecs int + SpecsThatWillRun int +} + +// Add is used by Ginkgo's parallel aggregation mechanisms to combine test run reports form individual parallel processes +// to form a complete final report. +func (report Report) Add(other Report) Report { + report.SuiteSucceeded = report.SuiteSucceeded && other.SuiteSucceeded + + if other.StartTime.Before(report.StartTime) { + report.StartTime = other.StartTime + } + + if other.EndTime.After(report.EndTime) { + report.EndTime = other.EndTime + } + + specialSuiteFailureReasons := []string{} + reasonsLookup := map[string]bool{} + for _, reasons := range [][]string{report.SpecialSuiteFailureReasons, other.SpecialSuiteFailureReasons} { + for _, reason := range reasons { + if !reasonsLookup[reason] { + reasonsLookup[reason] = true + specialSuiteFailureReasons = append(specialSuiteFailureReasons, reason) + } + } + } + report.SpecialSuiteFailureReasons = specialSuiteFailureReasons + report.RunTime = report.EndTime.Sub(report.StartTime) + + reports := make(SpecReports, len(report.SpecReports)+len(other.SpecReports)) + copy(reports, report.SpecReports) + offset := len(report.SpecReports) + for i := range other.SpecReports { + reports[i+offset] = other.SpecReports[i] + } + + report.SpecReports = reports + return report +} + +// SpecReport captures information about a Ginkgo spec. +type SpecReport struct { + // ContainerHierarchyTexts is a slice containing the text strings of + // all Describe/Context/When containers in this spec's hierarchy. + ContainerHierarchyTexts []string + + // ContainerHierarchyLocations is a slice containing the CodeLocations of + // all Describe/Context/When containers in this spec's hierarchy. + ContainerHierarchyLocations []CodeLocation + + // ContainerHierarchyLabels is a slice containing the labels of + // all Describe/Context/When containers in this spec's hierarchy + ContainerHierarchyLabels [][]string + + // LeafNodeType, LeadNodeLocation, LeafNodeLabels and LeafNodeText capture the NodeType, CodeLocation, and text + // of the Ginkgo node being tested (typically an NodeTypeIt node, though this can also be + // one of the NodeTypesForSuiteLevelNodes node types) + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeText string + + // State captures whether the spec has passed, failed, etc. + State SpecState + + // IsSerial captures whether the spec has the Serial decorator + IsSerial bool + + // IsInOrderedContainer captures whether the spec appears in an Ordered container + IsInOrderedContainer bool + + // StartTime and EndTime capture the start and end time of the spec + StartTime time.Time + EndTime time.Time + + // RunTime captures the duration of the spec + RunTime time.Duration + + // ParallelProcess captures the parallel process that this spec ran on + ParallelProcess int + + // RunningInParallel captures whether this spec is part of a suite that ran in parallel + RunningInParallel bool + + //Failure is populated if a spec has failed, panicked, been interrupted, or skipped by the user (e.g. calling Skip()) + //It includes detailed information about the Failure + Failure Failure + + // NumAttempts captures the number of times this Spec was run. + // Flakey specs can be retried with ginkgo --flake-attempts=N or the use of the FlakeAttempts decorator. + // Repeated specs can be retried with the use of the MustPassRepeatedly decorator + NumAttempts int + + // MaxFlakeAttempts captures whether the spec has been retried with ginkgo --flake-attempts=N or the use of the FlakeAttempts decorator. + MaxFlakeAttempts int + + // MaxMustPassRepeatedly captures whether the spec has the MustPassRepeatedly decorator + MaxMustPassRepeatedly int + + // CapturedGinkgoWriterOutput contains text printed to the GinkgoWriter + CapturedGinkgoWriterOutput string + + // CapturedStdOutErr contains text printed to stdout/stderr (when running in parallel) + // This is always empty when running in series or calling CurrentSpecReport() + // It is used internally by Ginkgo's reporter + CapturedStdOutErr string + + // ReportEntries contains any reports added via `AddReportEntry` + ReportEntries ReportEntries + + // ProgressReports contains any progress reports generated during this spec. These can either be manually triggered, or automatically generated by Ginkgo via the PollProgressAfter() decorator + ProgressReports []ProgressReport + + // AdditionalFailures contains any failures that occurred after the initial spec failure. These typically occur in cleanup nodes after the initial failure and are only emitted when running in verbose mode. + AdditionalFailures []AdditionalFailure + + // SpecEvents capture additional events that occur during the spec run + SpecEvents SpecEvents +} + +func (report SpecReport) MarshalJSON() ([]byte, error) { + //All this to avoid emitting an empty Failure struct in the JSON + out := struct { + ContainerHierarchyTexts []string + ContainerHierarchyLocations []CodeLocation + ContainerHierarchyLabels [][]string + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeText string + State SpecState + StartTime time.Time + EndTime time.Time + RunTime time.Duration + ParallelProcess int + Failure *Failure `json:",omitempty"` + NumAttempts int + MaxFlakeAttempts int + MaxMustPassRepeatedly int + CapturedGinkgoWriterOutput string `json:",omitempty"` + CapturedStdOutErr string `json:",omitempty"` + ReportEntries ReportEntries `json:",omitempty"` + ProgressReports []ProgressReport `json:",omitempty"` + AdditionalFailures []AdditionalFailure `json:",omitempty"` + SpecEvents SpecEvents `json:",omitempty"` + }{ + ContainerHierarchyTexts: report.ContainerHierarchyTexts, + ContainerHierarchyLocations: report.ContainerHierarchyLocations, + ContainerHierarchyLabels: report.ContainerHierarchyLabels, + LeafNodeType: report.LeafNodeType, + LeafNodeLocation: report.LeafNodeLocation, + LeafNodeLabels: report.LeafNodeLabels, + LeafNodeText: report.LeafNodeText, + State: report.State, + StartTime: report.StartTime, + EndTime: report.EndTime, + RunTime: report.RunTime, + ParallelProcess: report.ParallelProcess, + Failure: nil, + ReportEntries: nil, + NumAttempts: report.NumAttempts, + MaxFlakeAttempts: report.MaxFlakeAttempts, + MaxMustPassRepeatedly: report.MaxMustPassRepeatedly, + CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput, + CapturedStdOutErr: report.CapturedStdOutErr, + } + + if !report.Failure.IsZero() { + out.Failure = &(report.Failure) + } + if len(report.ReportEntries) > 0 { + out.ReportEntries = report.ReportEntries + } + if len(report.ProgressReports) > 0 { + out.ProgressReports = report.ProgressReports + } + if len(report.AdditionalFailures) > 0 { + out.AdditionalFailures = report.AdditionalFailures + } + if len(report.SpecEvents) > 0 { + out.SpecEvents = report.SpecEvents + } + + return json.Marshal(out) +} + +// CombinedOutput returns a single string representation of both CapturedStdOutErr and CapturedGinkgoWriterOutput +// Note that both are empty when using CurrentSpecReport() so CurrentSpecReport().CombinedOutput() will always be empty. +// CombinedOutput() is used internally by Ginkgo's reporter. +func (report SpecReport) CombinedOutput() string { + if report.CapturedStdOutErr == "" { + return report.CapturedGinkgoWriterOutput + } + if report.CapturedGinkgoWriterOutput == "" { + return report.CapturedStdOutErr + } + return report.CapturedStdOutErr + "\n" + report.CapturedGinkgoWriterOutput +} + +// Failed returns true if report.State is one of the SpecStateFailureStates +// (SpecStateFailed, SpecStatePanicked, SpecStateinterrupted, SpecStateAborted) +func (report SpecReport) Failed() bool { + return report.State.Is(SpecStateFailureStates) +} + +// FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText +func (report SpecReport) FullText() string { + texts := []string{} + texts = append(texts, report.ContainerHierarchyTexts...) + if report.LeafNodeText != "" { + texts = append(texts, report.LeafNodeText) + } + return strings.Join(texts, " ") +} + +// Labels returns a deduped set of all the spec's Labels. +func (report SpecReport) Labels() []string { + out := []string{} + seen := map[string]bool{} + for _, labels := range report.ContainerHierarchyLabels { + for _, label := range labels { + if !seen[label] { + seen[label] = true + out = append(out, label) + } + } + } + for _, label := range report.LeafNodeLabels { + if !seen[label] { + seen[label] = true + out = append(out, label) + } + } + + return out +} + +// MatchesLabelFilter returns true if the spec satisfies the passed in label filter query +func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { + filter, err := ParseLabelFilter(query) + if err != nil { + return false, err + } + return filter(report.Labels()), nil +} + +// FileName() returns the name of the file containing the spec +func (report SpecReport) FileName() string { + return report.LeafNodeLocation.FileName +} + +// LineNumber() returns the line number of the leaf node +func (report SpecReport) LineNumber() int { + return report.LeafNodeLocation.LineNumber +} + +// FailureMessage() returns the failure message (or empty string if the test hasn't failed) +func (report SpecReport) FailureMessage() string { + return report.Failure.Message +} + +// FailureLocation() returns the location of the failure (or an empty CodeLocation if the test hasn't failed) +func (report SpecReport) FailureLocation() CodeLocation { + return report.Failure.Location +} + +// Timeline() returns a timeline view of the report +func (report SpecReport) Timeline() Timeline { + timeline := Timeline{} + if !report.Failure.IsZero() { + timeline = append(timeline, report.Failure) + if report.Failure.AdditionalFailure != nil { + timeline = append(timeline, *(report.Failure.AdditionalFailure)) + } + } + for _, additionalFailure := range report.AdditionalFailures { + timeline = append(timeline, additionalFailure) + } + for _, reportEntry := range report.ReportEntries { + timeline = append(timeline, reportEntry) + } + for _, progressReport := range report.ProgressReports { + timeline = append(timeline, progressReport) + } + for _, specEvent := range report.SpecEvents { + timeline = append(timeline, specEvent) + } + sort.Sort(timeline) + return timeline +} + +type SpecReports []SpecReport + +// WithLeafNodeType returns the subset of SpecReports with LeafNodeType matching one of the requested NodeTypes +func (reports SpecReports) WithLeafNodeType(nodeTypes NodeType) SpecReports { + count := 0 + for i := range reports { + if reports[i].LeafNodeType.Is(nodeTypes) { + count++ + } + } + + out := make(SpecReports, count) + j := 0 + for i := range reports { + if reports[i].LeafNodeType.Is(nodeTypes) { + out[j] = reports[i] + j++ + } + } + return out +} + +// WithState returns the subset of SpecReports with State matching one of the requested SpecStates +func (reports SpecReports) WithState(states SpecState) SpecReports { + count := 0 + for i := range reports { + if reports[i].State.Is(states) { + count++ + } + } + + out, j := make(SpecReports, count), 0 + for i := range reports { + if reports[i].State.Is(states) { + out[j] = reports[i] + j++ + } + } + return out +} + +// CountWithState returns the number of SpecReports with State matching one of the requested SpecStates +func (reports SpecReports) CountWithState(states SpecState) int { + n := 0 + for i := range reports { + if reports[i].State.Is(states) { + n += 1 + } + } + return n +} + +// If the Spec passes, CountOfFlakedSpecs returns the number of SpecReports that failed after multiple attempts. +func (reports SpecReports) CountOfFlakedSpecs() int { + n := 0 + for i := range reports { + if reports[i].MaxFlakeAttempts > 1 && reports[i].State.Is(SpecStatePassed) && reports[i].NumAttempts > 1 { + n += 1 + } + } + return n +} + +// If the Spec fails, CountOfRepeatedSpecs returns the number of SpecReports that passed after multiple attempts +func (reports SpecReports) CountOfRepeatedSpecs() int { + n := 0 + for i := range reports { + if reports[i].MaxMustPassRepeatedly > 1 && reports[i].State.Is(SpecStateFailureStates) && reports[i].NumAttempts > 1 { + n += 1 + } + } + return n +} + +// TimelineLocation captures the location of an event in the spec's timeline +type TimelineLocation struct { + //Offset is the offset (in bytes) of the event relative to the GinkgoWriter stream + Offset int `json:",omitempty"` + + //Order is the order of the event with respect to other events. The absolute value of Order + //is irrelevant. All that matters is that an event with a lower Order occurs before ane vent with a higher Order + Order int `json:",omitempty"` + + Time time.Time +} + +// TimelineEvent represent an event on the timeline +// consumers of Timeline will need to check the concrete type of each entry to determine how to handle it +type TimelineEvent interface { + GetTimelineLocation() TimelineLocation +} + +type Timeline []TimelineEvent + +func (t Timeline) Len() int { return len(t) } +func (t Timeline) Less(i, j int) bool { + return t[i].GetTimelineLocation().Order < t[j].GetTimelineLocation().Order +} +func (t Timeline) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t Timeline) WithoutHiddenReportEntries() Timeline { + out := Timeline{} + for _, event := range t { + if reportEntry, isReportEntry := event.(ReportEntry); isReportEntry && reportEntry.Visibility == ReportEntryVisibilityNever { + continue + } + out = append(out, event) + } + return out +} + +func (t Timeline) WithoutVeryVerboseSpecEvents() Timeline { + out := Timeline{} + for _, event := range t { + if specEvent, isSpecEvent := event.(SpecEvent); isSpecEvent && specEvent.IsOnlyVisibleAtVeryVerbose() { + continue + } + out = append(out, event) + } + return out +} + +// Failure captures failure information for an individual test +type Failure struct { + // Message - the failure message passed into Fail(...). When using a matcher library + // like Gomega, this will contain the failure message generated by Gomega. + // + // Message is also populated if the user has called Skip(...). + Message string + + // Location - the CodeLocation where the failure occurred + // This CodeLocation will include a fully-populated StackTrace + Location CodeLocation + + TimelineLocation TimelineLocation + + // ForwardedPanic - if the failure represents a captured panic (i.e. Summary.State == SpecStatePanicked) + // then ForwardedPanic will be populated with a string representation of the captured panic. + ForwardedPanic string `json:",omitempty"` + + // FailureNodeContext - one of three contexts describing the node in which the failure occurred: + // FailureNodeIsLeafNode means the failure occurred in the leaf node of the associated SpecReport. None of the other FailureNode fields will be populated + // FailureNodeAtTopLevel means the failure occurred in a non-leaf node that is defined at the top-level of the spec (i.e. not in a container). FailureNodeType and FailureNodeLocation will be populated. + // FailureNodeInContainer means the failure occurred in a non-leaf node that is defined within a container. FailureNodeType, FailureNodeLocation, and FailureNodeContainerIndex will be populated. + // + // FailureNodeType will contain the NodeType of the node in which the failure occurred. + // FailureNodeLocation will contain the CodeLocation of the node in which the failure occurred. + // If populated, FailureNodeContainerIndex will be the index into SpecReport.ContainerHierarchyTexts and SpecReport.ContainerHierarchyLocations that represents the parent container of the node in which the failure occurred. + FailureNodeContext FailureNodeContext `json:",omitempty"` + + FailureNodeType NodeType `json:",omitempty"` + + FailureNodeLocation CodeLocation `json:",omitempty"` + + FailureNodeContainerIndex int `json:",omitempty"` + + //ProgressReport is populated if the spec was interrupted or timed out + ProgressReport ProgressReport `json:",omitempty"` + + //AdditionalFailure is non-nil if a follow-on failure occurred within the same node after the primary failure. This only happens when a node has timed out or been interrupted. In such cases the AdditionalFailure can include information about where/why the spec was stuck. + AdditionalFailure *AdditionalFailure `json:",omitempty"` +} + +func (f Failure) IsZero() bool { + return f.Message == "" && (f.Location == CodeLocation{}) +} + +func (f Failure) GetTimelineLocation() TimelineLocation { + return f.TimelineLocation +} + +// FailureNodeContext captures the location context for the node containing the failing line of code +type FailureNodeContext uint + +const ( + FailureNodeContextInvalid FailureNodeContext = iota + + FailureNodeIsLeafNode + FailureNodeAtTopLevel + FailureNodeInContainer +) + +var fncEnumSupport = NewEnumSupport(map[uint]string{ + uint(FailureNodeContextInvalid): "INVALID FAILURE NODE CONTEXT", + uint(FailureNodeIsLeafNode): "leaf-node", + uint(FailureNodeAtTopLevel): "top-level", + uint(FailureNodeInContainer): "in-container", +}) + +func (fnc FailureNodeContext) String() string { + return fncEnumSupport.String(uint(fnc)) +} +func (fnc *FailureNodeContext) UnmarshalJSON(b []byte) error { + out, err := fncEnumSupport.UnmarshJSON(b) + *fnc = FailureNodeContext(out) + return err +} +func (fnc FailureNodeContext) MarshalJSON() ([]byte, error) { + return fncEnumSupport.MarshJSON(uint(fnc)) +} + +// AdditionalFailure capturs any additional failures that occur after the initial failure of a psec +// these typically occur in clean up nodes after the spec has failed. +// We can't simply use Failure as we want to track the SpecState to know what kind of failure this is +type AdditionalFailure struct { + State SpecState + Failure Failure +} + +func (f AdditionalFailure) GetTimelineLocation() TimelineLocation { + return f.Failure.TimelineLocation +} + +// SpecState captures the state of a spec +// To determine if a given `state` represents a failure state, use `state.Is(SpecStateFailureStates)` +type SpecState uint + +const ( + SpecStateInvalid SpecState = 0 + + SpecStatePending SpecState = 1 << iota + SpecStateSkipped + SpecStatePassed + SpecStateFailed + SpecStateAborted + SpecStatePanicked + SpecStateInterrupted + SpecStateTimedout +) + +var ssEnumSupport = NewEnumSupport(map[uint]string{ + uint(SpecStateInvalid): "INVALID SPEC STATE", + uint(SpecStatePending): "pending", + uint(SpecStateSkipped): "skipped", + uint(SpecStatePassed): "passed", + uint(SpecStateFailed): "failed", + uint(SpecStateAborted): "aborted", + uint(SpecStatePanicked): "panicked", + uint(SpecStateInterrupted): "interrupted", + uint(SpecStateTimedout): "timedout", +}) + +func (ss SpecState) String() string { + return ssEnumSupport.String(uint(ss)) +} +func (ss SpecState) GomegaString() string { + return ssEnumSupport.String(uint(ss)) +} +func (ss *SpecState) UnmarshalJSON(b []byte) error { + out, err := ssEnumSupport.UnmarshJSON(b) + *ss = SpecState(out) + return err +} +func (ss SpecState) MarshalJSON() ([]byte, error) { + return ssEnumSupport.MarshJSON(uint(ss)) +} + +var SpecStateFailureStates = SpecStateFailed | SpecStateTimedout | SpecStateAborted | SpecStatePanicked | SpecStateInterrupted + +func (ss SpecState) Is(states SpecState) bool { + return ss&states != 0 +} + +// ProgressReport captures the progress of the current spec. It is, effectively, a structured Ginkgo-aware stack trace +type ProgressReport struct { + Message string `json:",omitempty"` + ParallelProcess int `json:",omitempty"` + RunningInParallel bool `json:",omitempty"` + + ContainerHierarchyTexts []string `json:",omitempty"` + LeafNodeText string `json:",omitempty"` + LeafNodeLocation CodeLocation `json:",omitempty"` + SpecStartTime time.Time `json:",omitempty"` + + CurrentNodeType NodeType `json:",omitempty"` + CurrentNodeText string `json:",omitempty"` + CurrentNodeLocation CodeLocation `json:",omitempty"` + CurrentNodeStartTime time.Time `json:",omitempty"` + + CurrentStepText string `json:",omitempty"` + CurrentStepLocation CodeLocation `json:",omitempty"` + CurrentStepStartTime time.Time `json:",omitempty"` + + AdditionalReports []string `json:",omitempty"` + + CapturedGinkgoWriterOutput string `json:",omitempty"` + TimelineLocation TimelineLocation `json:",omitempty"` + + Goroutines []Goroutine `json:",omitempty"` +} + +func (pr ProgressReport) IsZero() bool { + return pr.CurrentNodeType == NodeTypeInvalid +} + +func (pr ProgressReport) Time() time.Time { + return pr.TimelineLocation.Time +} + +func (pr ProgressReport) SpecGoroutine() Goroutine { + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine { + return goroutine + } + } + return Goroutine{} +} + +func (pr ProgressReport) HighlightedGoroutines() []Goroutine { + out := []Goroutine{} + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine || !goroutine.HasHighlights() { + continue + } + out = append(out, goroutine) + } + return out +} + +func (pr ProgressReport) OtherGoroutines() []Goroutine { + out := []Goroutine{} + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine || goroutine.HasHighlights() { + continue + } + out = append(out, goroutine) + } + return out +} + +func (pr ProgressReport) WithoutCapturedGinkgoWriterOutput() ProgressReport { + out := pr + out.CapturedGinkgoWriterOutput = "" + return out +} + +func (pr ProgressReport) WithoutOtherGoroutines() ProgressReport { + out := pr + filteredGoroutines := []Goroutine{} + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine || goroutine.HasHighlights() { + filteredGoroutines = append(filteredGoroutines, goroutine) + } + } + out.Goroutines = filteredGoroutines + return out +} + +func (pr ProgressReport) GetTimelineLocation() TimelineLocation { + return pr.TimelineLocation +} + +type Goroutine struct { + ID uint64 + State string + Stack []FunctionCall + IsSpecGoroutine bool +} + +func (g Goroutine) IsZero() bool { + return g.ID == 0 +} + +func (g Goroutine) HasHighlights() bool { + for _, fc := range g.Stack { + if fc.Highlight { + return true + } + } + + return false +} + +type FunctionCall struct { + Function string + Filename string + Line int + Highlight bool `json:",omitempty"` + Source []string `json:",omitempty"` + SourceHighlight int `json:",omitempty"` +} + +// NodeType captures the type of a given Ginkgo Node +type NodeType uint + +const ( + NodeTypeInvalid NodeType = 0 + + NodeTypeContainer NodeType = 1 << iota + NodeTypeIt + + NodeTypeBeforeEach + NodeTypeJustBeforeEach + NodeTypeAfterEach + NodeTypeJustAfterEach + + NodeTypeBeforeAll + NodeTypeAfterAll + + NodeTypeBeforeSuite + NodeTypeSynchronizedBeforeSuite + NodeTypeAfterSuite + NodeTypeSynchronizedAfterSuite + + NodeTypeReportBeforeEach + NodeTypeReportAfterEach + NodeTypeReportBeforeSuite + NodeTypeReportAfterSuite + + NodeTypeCleanupInvalid + NodeTypeCleanupAfterEach + NodeTypeCleanupAfterAll + NodeTypeCleanupAfterSuite +) + +var NodeTypesForContainerAndIt = NodeTypeContainer | NodeTypeIt +var NodeTypesForSuiteLevelNodes = NodeTypeBeforeSuite | NodeTypeSynchronizedBeforeSuite | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite | NodeTypeCleanupAfterSuite +var NodeTypesAllowedDuringCleanupInterrupt = NodeTypeAfterEach | NodeTypeJustAfterEach | NodeTypeAfterAll | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeCleanupAfterEach | NodeTypeCleanupAfterAll | NodeTypeCleanupAfterSuite +var NodeTypesAllowedDuringReportInterrupt = NodeTypeReportBeforeEach | NodeTypeReportAfterEach | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite + +var ntEnumSupport = NewEnumSupport(map[uint]string{ + uint(NodeTypeInvalid): "INVALID NODE TYPE", + uint(NodeTypeContainer): "Container", + uint(NodeTypeIt): "It", + uint(NodeTypeBeforeEach): "BeforeEach", + uint(NodeTypeJustBeforeEach): "JustBeforeEach", + uint(NodeTypeAfterEach): "AfterEach", + uint(NodeTypeJustAfterEach): "JustAfterEach", + uint(NodeTypeBeforeAll): "BeforeAll", + uint(NodeTypeAfterAll): "AfterAll", + uint(NodeTypeBeforeSuite): "BeforeSuite", + uint(NodeTypeSynchronizedBeforeSuite): "SynchronizedBeforeSuite", + uint(NodeTypeAfterSuite): "AfterSuite", + uint(NodeTypeSynchronizedAfterSuite): "SynchronizedAfterSuite", + uint(NodeTypeReportBeforeEach): "ReportBeforeEach", + uint(NodeTypeReportAfterEach): "ReportAfterEach", + uint(NodeTypeReportBeforeSuite): "ReportBeforeSuite", + uint(NodeTypeReportAfterSuite): "ReportAfterSuite", + uint(NodeTypeCleanupInvalid): "DeferCleanup", + uint(NodeTypeCleanupAfterEach): "DeferCleanup (Each)", + uint(NodeTypeCleanupAfterAll): "DeferCleanup (All)", + uint(NodeTypeCleanupAfterSuite): "DeferCleanup (Suite)", +}) + +func (nt NodeType) String() string { + return ntEnumSupport.String(uint(nt)) +} +func (nt *NodeType) UnmarshalJSON(b []byte) error { + out, err := ntEnumSupport.UnmarshJSON(b) + *nt = NodeType(out) + return err +} +func (nt NodeType) MarshalJSON() ([]byte, error) { + return ntEnumSupport.MarshJSON(uint(nt)) +} + +func (nt NodeType) Is(nodeTypes NodeType) bool { + return nt&nodeTypes != 0 +} + +/* +SpecEvent captures a vareity of events that can occur when specs run. See SpecEventType for the list of available events. +*/ +type SpecEvent struct { + SpecEventType SpecEventType + + CodeLocation CodeLocation + TimelineLocation TimelineLocation + + Message string `json:",omitempty"` + Duration time.Duration `json:",omitempty"` + NodeType NodeType `json:",omitempty"` + Attempt int `json:",omitempty"` +} + +func (se SpecEvent) GetTimelineLocation() TimelineLocation { + return se.TimelineLocation +} + +func (se SpecEvent) IsOnlyVisibleAtVeryVerbose() bool { + return se.SpecEventType.Is(SpecEventByEnd | SpecEventNodeStart | SpecEventNodeEnd) +} + +func (se SpecEvent) GomegaString() string { + out := &strings.Builder{} + out.WriteString("[" + se.SpecEventType.String() + " SpecEvent] ") + if se.Message != "" { + out.WriteString("Message=") + out.WriteString(`"` + se.Message + `",`) + } + if se.Duration != 0 { + out.WriteString("Duration=" + se.Duration.String() + ",") + } + if se.NodeType != NodeTypeInvalid { + out.WriteString("NodeType=" + se.NodeType.String() + ",") + } + if se.Attempt != 0 { + out.WriteString(fmt.Sprintf("Attempt=%d", se.Attempt) + ",") + } + out.WriteString("CL=" + se.CodeLocation.String() + ",") + out.WriteString(fmt.Sprintf("TL.Offset=%d", se.TimelineLocation.Offset)) + + return out.String() +} + +type SpecEvents []SpecEvent + +func (se SpecEvents) WithType(seType SpecEventType) SpecEvents { + out := SpecEvents{} + for _, event := range se { + if event.SpecEventType.Is(seType) { + out = append(out, event) + } + } + return out +} + +type SpecEventType uint + +const ( + SpecEventInvalid SpecEventType = 0 + + SpecEventByStart SpecEventType = 1 << iota + SpecEventByEnd + SpecEventNodeStart + SpecEventNodeEnd + SpecEventSpecRepeat + SpecEventSpecRetry +) + +var seEnumSupport = NewEnumSupport(map[uint]string{ + uint(SpecEventInvalid): "INVALID SPEC EVENT", + uint(SpecEventByStart): "By", + uint(SpecEventByEnd): "By (End)", + uint(SpecEventNodeStart): "Node", + uint(SpecEventNodeEnd): "Node (End)", + uint(SpecEventSpecRepeat): "Repeat", + uint(SpecEventSpecRetry): "Retry", +}) + +func (se SpecEventType) String() string { + return seEnumSupport.String(uint(se)) +} +func (se *SpecEventType) UnmarshalJSON(b []byte) error { + out, err := seEnumSupport.UnmarshJSON(b) + *se = SpecEventType(out) + return err +} +func (se SpecEventType) MarshalJSON() ([]byte, error) { + return seEnumSupport.MarshJSON(uint(se)) +} + +func (se SpecEventType) Is(specEventTypes SpecEventType) bool { + return se&specEventTypes != 0 +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go new file mode 100644 index 00000000..a4a1524b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -0,0 +1,3 @@ +package types + +const VERSION = "2.13.2" diff --git a/vendor/github.com/quic-go/quic-go/.gitignore b/vendor/github.com/quic-go/quic-go/.gitignore new file mode 100644 index 00000000..b454729d --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/.gitignore @@ -0,0 +1,18 @@ +debug +debug.test +main +mockgen_tmp.go +*.qtr +*.qlog +*.sqlog +*.txt +race.[0-9]* + +fuzzing/*/*.zip +fuzzing/*/coverprofile +fuzzing/*/crashers +fuzzing/*/sonarprofile +fuzzing/*/suppressions +fuzzing/*/corpus/ + +gomock_reflect_*/ diff --git a/vendor/github.com/quic-go/quic-go/.golangci.yml b/vendor/github.com/quic-go/quic-go/.golangci.yml new file mode 100644 index 00000000..7c4b71c0 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/.golangci.yml @@ -0,0 +1,45 @@ +linters-settings: + misspell: + ignore-words: + - ect + depguard: + rules: + quicvarint: + list-mode: strict + files: + - "**/github.com/quic-go/quic-go/quicvarint/*" + - "!$test" + allow: + - $gostd + +linters: + disable-all: true + enable: + - asciicheck + - depguard + - exhaustive + - exportloopref + - goimports + - gofmt # redundant, since gofmt *should* be a no-op after gofumpt + - gofumpt + - gosimple + - govet + - ineffassign + - misspell + - prealloc + - staticcheck + - stylecheck + - unconvert + - unparam + - unused + +issues: + exclude-files: + - internal/handshake/cipher_suite.go + exclude-rules: + - path: internal/qtls + linters: + - depguard + - path: _test\.go + linters: + - exhaustive diff --git a/vendor/github.com/quic-go/quic-go/Changelog.md b/vendor/github.com/quic-go/quic-go/Changelog.md new file mode 100644 index 00000000..82df5fb2 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/Changelog.md @@ -0,0 +1,109 @@ +# Changelog + +## v0.22.0 (2021-07-25) + +- Use `ReadBatch` to read multiple UDP packets from the socket with a single syscall +- Add a config option (`Config.DisableVersionNegotiationPackets`) to disable sending of Version Negotiation packets +- Drop support for QUIC draft versions 32 and 34 +- Remove the `RetireBugBackwardsCompatibilityMode`, which was intended to mitigate a bug when retiring connection IDs in quic-go in v0.17.2 and ealier + +## v0.21.2 (2021-07-15) + +- Update qtls (for Go 1.15, 1.16 and 1.17rc1) to include the fix for the crypto/tls panic (see https://groups.google.com/g/golang-dev/c/5LJ2V7rd-Ag/m/YGLHVBZ6AAAJ for details) + +## v0.21.0 (2021-06-01) + +- quic-go now supports RFC 9000! + +## v0.20.0 (2021-03-19) + +- Remove the `quic.Config.HandshakeTimeout`. Introduce a `quic.Config.HandshakeIdleTimeout`. + +## v0.17.1 (2020-06-20) + +- Supports QUIC WG draft-29. +- Improve bundling of ACK frames (#2543). + +## v0.16.0 (2020-05-31) + +- Supports QUIC WG draft-28. + +## v0.15.0 (2020-03-01) + +- Supports QUIC WG draft-27. +- Add support for 0-RTT. +- Remove `Session.Close()`. Applications need to pass an application error code to the transport using `Session.CloseWithError()`. +- Make the TLS Cipher Suites configurable (via `tls.Config.CipherSuites`). + +## v0.14.0 (2019-12-04) + +- Supports QUIC WG draft-24. + +## v0.13.0 (2019-11-05) + +- Supports QUIC WG draft-23. +- Add an `EarlyListener` that allows sending of 0.5-RTT data. +- Add a `TokenStore` to store address validation tokens. +- Issue and use new connection IDs during a connection. + +## v0.12.0 (2019-08-05) + +- Implement HTTP/3. +- Rename `quic.Cookie` to `quic.Token` and `quic.Config.AcceptCookie` to `quic.Config.AcceptToken`. +- Distinguish between Retry tokens and tokens sent in NEW_TOKEN frames. +- Enforce application protocol negotiation (via `tls.Config.NextProtos`). +- Use a varint for error codes. +- Add support for [quic-trace](https://github.com/google/quic-trace). +- Add a context to `Listener.Accept`, `Session.Accept{Uni}Stream` and `Session.Open{Uni}StreamSync`. +- Implement TLS key updates. + +## v0.11.0 (2019-04-05) + +- Drop support for gQUIC. For qQUIC support, please switch to the *gquic* branch. +- Implement QUIC WG draft-19. +- Use [qtls](https://github.com/marten-seemann/qtls) for TLS 1.3. +- Return a `tls.ConnectionState` from `quic.Session.ConnectionState()`. +- Remove the error return values from `quic.Stream.CancelRead()` and `quic.Stream.CancelWrite()` + +## v0.10.0 (2018-08-28) + +- Add support for QUIC 44, drop support for QUIC 42. + +## v0.9.0 (2018-08-15) + +- Add a `quic.Config` option for the length of the connection ID (for IETF QUIC). +- Split Session.Close into one method for regular closing and one for closing with an error. + +## v0.8.0 (2018-06-26) + +- Add support for unidirectional streams (for IETF QUIC). +- Add a `quic.Config` option for the maximum number of incoming streams. +- Add support for QUIC 42 and 43. +- Add dial functions that use a context. +- Multiplex clients on a net.PacketConn, when using Dial(conn). + +## v0.7.0 (2018-02-03) + +- The lower boundary for packets included in ACKs is now derived, and the value sent in STOP_WAITING frames is ignored. +- Remove `DialNonFWSecure` and `DialAddrNonFWSecure`. +- Expose the `ConnectionState` in the `Session` (experimental API). +- Implement packet pacing. + +## v0.6.0 (2017-12-12) + +- Add support for QUIC 39, drop support for QUIC 35 - 37 +- Added `quic.Config` options for maximal flow control windows +- Add a `quic.Config` option for QUIC versions +- Add a `quic.Config` option to request omission of the connection ID from a server +- Add a `quic.Config` option to configure the source address validation +- Add a `quic.Config` option to configure the handshake timeout +- Add a `quic.Config` option to configure the idle timeout +- Add a `quic.Config` option to configure keep-alive +- Rename the STK to Cookie +- Implement `net.Conn`-style deadlines for streams +- Remove the `tls.Config` from the `quic.Config`. The `tls.Config` must now be passed to the `Dial` and `Listen` functions as a separate parameter. See the [Godoc](https://godoc.org/github.com/quic-go/quic-go) for details. +- Changed the log level environment variable to only accept strings ("DEBUG", "INFO", "ERROR"), see [the wiki](https://github.com/quic-go/quic-go/wiki/Logging) for more details. +- Rename the `h2quic.QuicRoundTripper` to `h2quic.RoundTripper` +- Changed `h2quic.Server.Serve()` to accept a `net.PacketConn` +- Drop support for Go 1.7 and 1.8. +- Various bugfixes diff --git a/vendor/github.com/quic-go/quic-go/LICENSE b/vendor/github.com/quic-go/quic-go/LICENSE new file mode 100644 index 00000000..51378bef --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 the quic-go authors & Google, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/quic-go/quic-go/README.md b/vendor/github.com/quic-go/quic-go/README.md new file mode 100644 index 00000000..94823d99 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/README.md @@ -0,0 +1,50 @@ +# A QUIC implementation in pure Go + + + +[![Documentation](https://img.shields.io/badge/docs-quic--go.net-red?style=flat)](https://quic-go.net/docs/) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/quic-go/quic-go)](https://pkg.go.dev/github.com/quic-go/quic-go) +[![Code Coverage](https://img.shields.io/codecov/c/github/quic-go/quic-go/master.svg?style=flat-square)](https://codecov.io/gh/quic-go/quic-go/) +[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/quic-go.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:quic-go) + +quic-go is an implementation of the QUIC protocol ([RFC 9000](https://datatracker.ietf.org/doc/html/rfc9000), [RFC 9001](https://datatracker.ietf.org/doc/html/rfc9001), [RFC 9002](https://datatracker.ietf.org/doc/html/rfc9002)) in Go. It has support for HTTP/3 ([RFC 9114](https://datatracker.ietf.org/doc/html/rfc9114)), including QPACK ([RFC 9204](https://datatracker.ietf.org/doc/html/rfc9204)) and HTTP Datagrams ([RFC 9297](https://datatracker.ietf.org/doc/html/rfc9297)). + +In addition to these base RFCs, it also implements the following RFCs: +* Unreliable Datagram Extension ([RFC 9221](https://datatracker.ietf.org/doc/html/rfc9221)) +* Datagram Packetization Layer Path MTU Discovery (DPLPMTUD, [RFC 8899](https://datatracker.ietf.org/doc/html/rfc8899)) +* QUIC Version 2 ([RFC 9369](https://datatracker.ietf.org/doc/html/rfc9369)) +* QUIC Event Logging using qlog ([draft-ietf-quic-qlog-main-schema](https://datatracker.ietf.org/doc/draft-ietf-quic-qlog-main-schema/) and [draft-ietf-quic-qlog-quic-events](https://datatracker.ietf.org/doc/draft-ietf-quic-qlog-quic-events/)) + +Support for WebTransport over HTTP/3 ([draft-ietf-webtrans-http3](https://datatracker.ietf.org/doc/draft-ietf-webtrans-http3/)) is implemented in [webtransport-go](https://github.com/quic-go/webtransport-go). + +Detailed documentation can be found on [quic-go.net](https://quic-go.net/docs/). + +## Projects using quic-go + +| Project | Description | Stars | +| ---------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- | +| [AdGuardHome](https://github.com/AdguardTeam/AdGuardHome) | Free and open source, powerful network-wide ads & trackers blocking DNS server. | ![GitHub Repo stars](https://img.shields.io/github/stars/AdguardTeam/AdGuardHome?style=flat-square) | +| [algernon](https://github.com/xyproto/algernon) | Small self-contained pure-Go web server with Lua, Markdown, HTTP/2, QUIC, Redis and PostgreSQL support | ![GitHub Repo stars](https://img.shields.io/github/stars/xyproto/algernon?style=flat-square) | +| [caddy](https://github.com/caddyserver/caddy/) | Fast, multi-platform web server with automatic HTTPS | ![GitHub Repo stars](https://img.shields.io/github/stars/caddyserver/caddy?style=flat-square) | +| [cloudflared](https://github.com/cloudflare/cloudflared) | A tunneling daemon that proxies traffic from the Cloudflare network to your origins | ![GitHub Repo stars](https://img.shields.io/github/stars/cloudflare/cloudflared?style=flat-square) | +| [frp](https://github.com/fatedier/frp) | A fast reverse proxy to help you expose a local server behind a NAT or firewall to the internet | ![GitHub Repo stars](https://img.shields.io/github/stars/fatedier/frp?style=flat-square) | +| [go-libp2p](https://github.com/libp2p/go-libp2p) | libp2p implementation in Go, powering [Kubo](https://github.com/ipfs/kubo) (IPFS) and [Lotus](https://github.com/filecoin-project/lotus) (Filecoin), among others | ![GitHub Repo stars](https://img.shields.io/github/stars/libp2p/go-libp2p?style=flat-square) | +| [gost](https://github.com/go-gost/gost) | A simple security tunnel written in Go | ![GitHub Repo stars](https://img.shields.io/github/stars/go-gost/gost?style=flat-square) | +| [Hysteria](https://github.com/apernet/hysteria) | A powerful, lightning fast and censorship resistant proxy | ![GitHub Repo stars](https://img.shields.io/github/stars/apernet/hysteria?style=flat-square) | +| [Mercure](https://github.com/dunglas/mercure) | An open, easy, fast, reliable and battery-efficient solution for real-time communications | ![GitHub Repo stars](https://img.shields.io/github/stars/dunglas/mercure?style=flat-square) | +| [OONI Probe](https://github.com/ooni/probe-cli) | Next generation OONI Probe. Library and CLI tool. | ![GitHub Repo stars](https://img.shields.io/github/stars/ooni/probe-cli?style=flat-square) | +| [RoadRunner](https://github.com/roadrunner-server/roadrunner) | High-performance PHP application server, process manager written in Go and powered with plugins | ![GitHub Repo stars](https://img.shields.io/github/stars/roadrunner-server/roadrunner?style=flat-square) | +| [syncthing](https://github.com/syncthing/syncthing/) | Open Source Continuous File Synchronization | ![GitHub Repo stars](https://img.shields.io/github/stars/syncthing/syncthing?style=flat-square) | +| [traefik](https://github.com/traefik/traefik) | The Cloud Native Application Proxy | ![GitHub Repo stars](https://img.shields.io/github/stars/traefik/traefik?style=flat-square) | +| [v2ray-core](https://github.com/v2fly/v2ray-core) | A platform for building proxies to bypass network restrictions | ![GitHub Repo stars](https://img.shields.io/github/stars/v2fly/v2ray-core?style=flat-square) | +| [YoMo](https://github.com/yomorun/yomo) | Streaming Serverless Framework for Geo-distributed System | ![GitHub Repo stars](https://img.shields.io/github/stars/yomorun/yomo?style=flat-square) | + +If you'd like to see your project added to this list, please send us a PR. + +## Release Policy + +quic-go always aims to support the latest two Go releases. + +## Contributing + +We are always happy to welcome new contributors! We have a number of self-contained issues that are suitable for first-time contributors, they are tagged with [help wanted](https://github.com/quic-go/quic-go/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22). If you have any questions, please feel free to reach out by opening an issue or leaving a comment. diff --git a/vendor/github.com/quic-go/quic-go/SECURITY.md b/vendor/github.com/quic-go/quic-go/SECURITY.md new file mode 100644 index 00000000..c24c08f8 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +quic-go still in development. This means that there may be problems in our protocols, +or there may be mistakes in our implementations. +We take security vulnerabilities very seriously. If you discover a security issue, +please bring it to our attention right away! + +## Reporting a Vulnerability + +If you find a vulnerability that may affect live deployments -- for example, by exposing +a remote execution exploit -- please [**report privately**](https://github.com/quic-go/quic-go/security/advisories/new). +Please **DO NOT file a public issue**. + +If the issue is an implementation weakness that cannot be immediately exploited or +something not yet deployed, just discuss it openly. + +## Reporting a non security bug + +For non-security bugs, please simply file a GitHub [issue](https://github.com/quic-go/quic-go/issues/new). diff --git a/vendor/github.com/quic-go/quic-go/buffer_pool.go b/vendor/github.com/quic-go/quic-go/buffer_pool.go new file mode 100644 index 00000000..48589e12 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/buffer_pool.go @@ -0,0 +1,92 @@ +package quic + +import ( + "sync" + + "github.com/quic-go/quic-go/internal/protocol" +) + +type packetBuffer struct { + Data []byte + + // refCount counts how many packets Data is used in. + // It doesn't support concurrent use. + // It is > 1 when used for coalesced packet. + refCount int +} + +// Split increases the refCount. +// It must be called when a packet buffer is used for more than one packet, +// e.g. when splitting coalesced packets. +func (b *packetBuffer) Split() { + b.refCount++ +} + +// Decrement decrements the reference counter. +// It doesn't put the buffer back into the pool. +func (b *packetBuffer) Decrement() { + b.refCount-- + if b.refCount < 0 { + panic("negative packetBuffer refCount") + } +} + +// MaybeRelease puts the packet buffer back into the pool, +// if the reference counter already reached 0. +func (b *packetBuffer) MaybeRelease() { + // only put the packetBuffer back if it's not used any more + if b.refCount == 0 { + b.putBack() + } +} + +// Release puts back the packet buffer into the pool. +// It should be called when processing is definitely finished. +func (b *packetBuffer) Release() { + b.Decrement() + if b.refCount != 0 { + panic("packetBuffer refCount not zero") + } + b.putBack() +} + +// Len returns the length of Data +func (b *packetBuffer) Len() protocol.ByteCount { return protocol.ByteCount(len(b.Data)) } +func (b *packetBuffer) Cap() protocol.ByteCount { return protocol.ByteCount(cap(b.Data)) } + +func (b *packetBuffer) putBack() { + if cap(b.Data) == protocol.MaxPacketBufferSize { + bufferPool.Put(b) + return + } + if cap(b.Data) == protocol.MaxLargePacketBufferSize { + largeBufferPool.Put(b) + return + } + panic("putPacketBuffer called with packet of wrong size!") +} + +var bufferPool, largeBufferPool sync.Pool + +func getPacketBuffer() *packetBuffer { + buf := bufferPool.Get().(*packetBuffer) + buf.refCount = 1 + buf.Data = buf.Data[:0] + return buf +} + +func getLargePacketBuffer() *packetBuffer { + buf := largeBufferPool.Get().(*packetBuffer) + buf.refCount = 1 + buf.Data = buf.Data[:0] + return buf +} + +func init() { + bufferPool.New = func() any { + return &packetBuffer{Data: make([]byte, 0, protocol.MaxPacketBufferSize)} + } + largeBufferPool.New = func() any { + return &packetBuffer{Data: make([]byte, 0, protocol.MaxLargePacketBufferSize)} + } +} diff --git a/vendor/github.com/quic-go/quic-go/client.go b/vendor/github.com/quic-go/quic-go/client.go new file mode 100644 index 00000000..1c5654f6 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/client.go @@ -0,0 +1,251 @@ +package quic + +import ( + "context" + "crypto/tls" + "errors" + "net" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/logging" +) + +type client struct { + sendConn sendConn + + use0RTT bool + + packetHandlers packetHandlerManager + onClose func() + + tlsConf *tls.Config + config *Config + + connIDGenerator ConnectionIDGenerator + srcConnID protocol.ConnectionID + destConnID protocol.ConnectionID + + initialPacketNumber protocol.PacketNumber + hasNegotiatedVersion bool + version protocol.Version + + handshakeChan chan struct{} + + conn quicConn + + tracer *logging.ConnectionTracer + tracingID ConnectionTracingID + logger utils.Logger +} + +// make it possible to mock connection ID for initial generation in the tests +var generateConnectionIDForInitial = protocol.GenerateConnectionIDForInitial + +// DialAddr establishes a new QUIC connection to a server. +// It resolves the address, and then creates a new UDP connection to dial the QUIC server. +// When the QUIC connection is closed, this UDP connection is closed. +// See Dial for more details. +func DialAddr(ctx context.Context, addr string, tlsConf *tls.Config, conf *Config) (Connection, error) { + udpConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0}) + if err != nil { + return nil, err + } + udpAddr, err := net.ResolveUDPAddr("udp", addr) + if err != nil { + return nil, err + } + tr, err := setupTransport(udpConn, tlsConf, true) + if err != nil { + return nil, err + } + return tr.dial(ctx, udpAddr, addr, tlsConf, conf, false) +} + +// DialAddrEarly establishes a new 0-RTT QUIC connection to a server. +// See DialAddr for more details. +func DialAddrEarly(ctx context.Context, addr string, tlsConf *tls.Config, conf *Config) (EarlyConnection, error) { + udpConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0}) + if err != nil { + return nil, err + } + udpAddr, err := net.ResolveUDPAddr("udp", addr) + if err != nil { + return nil, err + } + tr, err := setupTransport(udpConn, tlsConf, true) + if err != nil { + return nil, err + } + conn, err := tr.dial(ctx, udpAddr, addr, tlsConf, conf, true) + if err != nil { + tr.Close() + return nil, err + } + return conn, nil +} + +// DialEarly establishes a new 0-RTT QUIC connection to a server using a net.PacketConn. +// See Dial for more details. +func DialEarly(ctx context.Context, c net.PacketConn, addr net.Addr, tlsConf *tls.Config, conf *Config) (EarlyConnection, error) { + dl, err := setupTransport(c, tlsConf, false) + if err != nil { + return nil, err + } + conn, err := dl.DialEarly(ctx, addr, tlsConf, conf) + if err != nil { + dl.Close() + return nil, err + } + return conn, nil +} + +// Dial establishes a new QUIC connection to a server using a net.PacketConn. +// If the PacketConn satisfies the OOBCapablePacketConn interface (as a net.UDPConn does), +// ECN and packet info support will be enabled. In this case, ReadMsgUDP and WriteMsgUDP +// will be used instead of ReadFrom and WriteTo to read/write packets. +// The tls.Config must define an application protocol (using NextProtos). +// +// This is a convenience function. More advanced use cases should instantiate a Transport, +// which offers configuration options for a more fine-grained control of the connection establishment, +// including reusing the underlying UDP socket for multiple QUIC connections. +func Dial(ctx context.Context, c net.PacketConn, addr net.Addr, tlsConf *tls.Config, conf *Config) (Connection, error) { + dl, err := setupTransport(c, tlsConf, false) + if err != nil { + return nil, err + } + conn, err := dl.Dial(ctx, addr, tlsConf, conf) + if err != nil { + dl.Close() + return nil, err + } + return conn, nil +} + +func setupTransport(c net.PacketConn, tlsConf *tls.Config, createdPacketConn bool) (*Transport, error) { + if tlsConf == nil { + return nil, errors.New("quic: tls.Config not set") + } + return &Transport{ + Conn: c, + createdConn: createdPacketConn, + isSingleUse: true, + }, nil +} + +func dial( + ctx context.Context, + conn sendConn, + connIDGenerator ConnectionIDGenerator, + packetHandlers packetHandlerManager, + tlsConf *tls.Config, + config *Config, + onClose func(), + use0RTT bool, +) (quicConn, error) { + c, err := newClient(conn, connIDGenerator, config, tlsConf, onClose, use0RTT) + if err != nil { + return nil, err + } + c.packetHandlers = packetHandlers + + c.tracingID = nextConnTracingID() + if c.config.Tracer != nil { + c.tracer = c.config.Tracer(context.WithValue(ctx, ConnectionTracingKey, c.tracingID), protocol.PerspectiveClient, c.destConnID) + } + if c.tracer != nil && c.tracer.StartedConnection != nil { + c.tracer.StartedConnection(c.sendConn.LocalAddr(), c.sendConn.RemoteAddr(), c.srcConnID, c.destConnID) + } + if err := c.dial(ctx); err != nil { + return nil, err + } + return c.conn, nil +} + +func newClient(sendConn sendConn, connIDGenerator ConnectionIDGenerator, config *Config, tlsConf *tls.Config, onClose func(), use0RTT bool) (*client, error) { + srcConnID, err := connIDGenerator.GenerateConnectionID() + if err != nil { + return nil, err + } + destConnID, err := generateConnectionIDForInitial() + if err != nil { + return nil, err + } + c := &client{ + connIDGenerator: connIDGenerator, + srcConnID: srcConnID, + destConnID: destConnID, + sendConn: sendConn, + use0RTT: use0RTT, + onClose: onClose, + tlsConf: tlsConf, + config: config, + version: config.Versions[0], + handshakeChan: make(chan struct{}), + logger: utils.DefaultLogger.WithPrefix("client"), + } + return c, nil +} + +func (c *client) dial(ctx context.Context) error { + c.logger.Infof("Starting new connection to %s (%s -> %s), source connection ID %s, destination connection ID %s, version %s", c.tlsConf.ServerName, c.sendConn.LocalAddr(), c.sendConn.RemoteAddr(), c.srcConnID, c.destConnID, c.version) + + c.conn = newClientConnection( + context.WithValue(context.WithoutCancel(ctx), ConnectionTracingKey, c.tracingID), + c.sendConn, + c.packetHandlers, + c.destConnID, + c.srcConnID, + c.connIDGenerator, + c.config, + c.tlsConf, + c.initialPacketNumber, + c.use0RTT, + c.hasNegotiatedVersion, + c.tracer, + c.logger, + c.version, + ) + c.packetHandlers.Add(c.srcConnID, c.conn) + + errorChan := make(chan error, 1) + recreateChan := make(chan errCloseForRecreating) + go func() { + err := c.conn.run() + var recreateErr *errCloseForRecreating + if errors.As(err, &recreateErr) { + recreateChan <- *recreateErr + return + } + if c.onClose != nil { + c.onClose() + } + errorChan <- err // returns as soon as the connection is closed + }() + + // only set when we're using 0-RTT + // Otherwise, earlyConnChan will be nil. Receiving from a nil chan blocks forever. + var earlyConnChan <-chan struct{} + if c.use0RTT { + earlyConnChan = c.conn.earlyConnReady() + } + + select { + case <-ctx.Done(): + c.conn.destroy(nil) + return context.Cause(ctx) + case err := <-errorChan: + return err + case recreateErr := <-recreateChan: + c.initialPacketNumber = recreateErr.nextPacketNumber + c.version = recreateErr.nextVersion + c.hasNegotiatedVersion = true + return c.dial(ctx) + case <-earlyConnChan: + // ready to send 0-RTT data + return nil + case <-c.conn.HandshakeComplete(): + // handshake successfully completed + return nil + } +} diff --git a/vendor/github.com/quic-go/quic-go/closed_conn.go b/vendor/github.com/quic-go/quic-go/closed_conn.go new file mode 100644 index 00000000..83338532 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/closed_conn.go @@ -0,0 +1,57 @@ +package quic + +import ( + "math/bits" + "net" + + "github.com/quic-go/quic-go/internal/utils" +) + +// A closedLocalConn is a connection that we closed locally. +// When receiving packets for such a connection, we need to retransmit the packet containing the CONNECTION_CLOSE frame, +// with an exponential backoff. +type closedLocalConn struct { + counter uint32 + logger utils.Logger + + sendPacket func(net.Addr, packetInfo) +} + +var _ packetHandler = &closedLocalConn{} + +// newClosedLocalConn creates a new closedLocalConn and runs it. +func newClosedLocalConn(sendPacket func(net.Addr, packetInfo), logger utils.Logger) packetHandler { + return &closedLocalConn{ + sendPacket: sendPacket, + logger: logger, + } +} + +func (c *closedLocalConn) handlePacket(p receivedPacket) { + c.counter++ + // exponential backoff + // only send a CONNECTION_CLOSE for the 1st, 2nd, 4th, 8th, 16th, ... packet arriving + if bits.OnesCount32(c.counter) != 1 { + return + } + c.logger.Debugf("Received %d packets after sending CONNECTION_CLOSE. Retransmitting.", c.counter) + c.sendPacket(p.remoteAddr, p.info) +} + +func (c *closedLocalConn) destroy(error) {} +func (c *closedLocalConn) closeWithTransportError(TransportErrorCode) {} + +// A closedRemoteConn is a connection that was closed remotely. +// For such a connection, we might receive reordered packets that were sent before the CONNECTION_CLOSE. +// We can just ignore those packets. +type closedRemoteConn struct{} + +var _ packetHandler = &closedRemoteConn{} + +func newClosedRemoteConn() packetHandler { + return &closedRemoteConn{} +} + +func (c *closedRemoteConn) handlePacket(receivedPacket) {} +func (c *closedRemoteConn) destroy(error) {} +func (c *closedRemoteConn) closeWithTransportError(TransportErrorCode) {} diff --git a/vendor/github.com/quic-go/quic-go/codecov.yml b/vendor/github.com/quic-go/quic-go/codecov.yml new file mode 100644 index 00000000..59e4b58f --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/codecov.yml @@ -0,0 +1,16 @@ +coverage: + round: nearest + ignore: + - http3/gzip_reader.go + - interop/ + - internal/handshake/cipher_suite.go + - internal/utils/linkedlist/linkedlist.go + - internal/testdata + - testutils/ + - fuzzing/ + - metrics/ + status: + project: + default: + threshold: 0.5 + patch: false diff --git a/vendor/github.com/quic-go/quic-go/config.go b/vendor/github.com/quic-go/quic-go/config.go new file mode 100644 index 00000000..d42bdc1c --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/config.go @@ -0,0 +1,128 @@ +package quic + +import ( + "fmt" + "time" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" +) + +// Clone clones a Config +func (c *Config) Clone() *Config { + copy := *c + return © +} + +func (c *Config) handshakeTimeout() time.Duration { + return 2 * c.HandshakeIdleTimeout +} + +func (c *Config) maxRetryTokenAge() time.Duration { + return c.handshakeTimeout() +} + +func validateConfig(config *Config) error { + if config == nil { + return nil + } + const maxStreams = 1 << 60 + if config.MaxIncomingStreams > maxStreams { + config.MaxIncomingStreams = maxStreams + } + if config.MaxIncomingUniStreams > maxStreams { + config.MaxIncomingUniStreams = maxStreams + } + if config.MaxStreamReceiveWindow > quicvarint.Max { + config.MaxStreamReceiveWindow = quicvarint.Max + } + if config.MaxConnectionReceiveWindow > quicvarint.Max { + config.MaxConnectionReceiveWindow = quicvarint.Max + } + if config.InitialPacketSize > 0 && config.InitialPacketSize < protocol.MinInitialPacketSize { + config.InitialPacketSize = protocol.MinInitialPacketSize + } + if config.InitialPacketSize > protocol.MaxPacketBufferSize { + config.InitialPacketSize = protocol.MaxPacketBufferSize + } + // check that all QUIC versions are actually supported + for _, v := range config.Versions { + if !protocol.IsValidVersion(v) { + return fmt.Errorf("invalid QUIC version: %s", v) + } + } + return nil +} + +// populateConfig populates fields in the quic.Config with their default values, if none are set +// it may be called with nil +func populateConfig(config *Config) *Config { + if config == nil { + config = &Config{} + } + versions := config.Versions + if len(versions) == 0 { + versions = protocol.SupportedVersions + } + handshakeIdleTimeout := protocol.DefaultHandshakeIdleTimeout + if config.HandshakeIdleTimeout != 0 { + handshakeIdleTimeout = config.HandshakeIdleTimeout + } + idleTimeout := protocol.DefaultIdleTimeout + if config.MaxIdleTimeout != 0 { + idleTimeout = config.MaxIdleTimeout + } + initialStreamReceiveWindow := config.InitialStreamReceiveWindow + if initialStreamReceiveWindow == 0 { + initialStreamReceiveWindow = protocol.DefaultInitialMaxStreamData + } + maxStreamReceiveWindow := config.MaxStreamReceiveWindow + if maxStreamReceiveWindow == 0 { + maxStreamReceiveWindow = protocol.DefaultMaxReceiveStreamFlowControlWindow + } + initialConnectionReceiveWindow := config.InitialConnectionReceiveWindow + if initialConnectionReceiveWindow == 0 { + initialConnectionReceiveWindow = protocol.DefaultInitialMaxData + } + maxConnectionReceiveWindow := config.MaxConnectionReceiveWindow + if maxConnectionReceiveWindow == 0 { + maxConnectionReceiveWindow = protocol.DefaultMaxReceiveConnectionFlowControlWindow + } + maxIncomingStreams := config.MaxIncomingStreams + if maxIncomingStreams == 0 { + maxIncomingStreams = protocol.DefaultMaxIncomingStreams + } else if maxIncomingStreams < 0 { + maxIncomingStreams = 0 + } + maxIncomingUniStreams := config.MaxIncomingUniStreams + if maxIncomingUniStreams == 0 { + maxIncomingUniStreams = protocol.DefaultMaxIncomingUniStreams + } else if maxIncomingUniStreams < 0 { + maxIncomingUniStreams = 0 + } + initialPacketSize := config.InitialPacketSize + if initialPacketSize == 0 { + initialPacketSize = protocol.InitialPacketSize + } + + return &Config{ + GetConfigForClient: config.GetConfigForClient, + Versions: versions, + HandshakeIdleTimeout: handshakeIdleTimeout, + MaxIdleTimeout: idleTimeout, + KeepAlivePeriod: config.KeepAlivePeriod, + InitialStreamReceiveWindow: initialStreamReceiveWindow, + MaxStreamReceiveWindow: maxStreamReceiveWindow, + InitialConnectionReceiveWindow: initialConnectionReceiveWindow, + MaxConnectionReceiveWindow: maxConnectionReceiveWindow, + AllowConnectionWindowIncrease: config.AllowConnectionWindowIncrease, + MaxIncomingStreams: maxIncomingStreams, + MaxIncomingUniStreams: maxIncomingUniStreams, + TokenStore: config.TokenStore, + EnableDatagrams: config.EnableDatagrams, + InitialPacketSize: initialPacketSize, + DisablePathMTUDiscovery: config.DisablePathMTUDiscovery, + Allow0RTT: config.Allow0RTT, + Tracer: config.Tracer, + } +} diff --git a/vendor/github.com/quic-go/quic-go/conn_id_generator.go b/vendor/github.com/quic-go/quic-go/conn_id_generator.go new file mode 100644 index 00000000..d7be6540 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/conn_id_generator.go @@ -0,0 +1,138 @@ +package quic + +import ( + "fmt" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/wire" +) + +type connIDGenerator struct { + generator ConnectionIDGenerator + highestSeq uint64 + + activeSrcConnIDs map[uint64]protocol.ConnectionID + initialClientDestConnID *protocol.ConnectionID // nil for the client + + addConnectionID func(protocol.ConnectionID) + getStatelessResetToken func(protocol.ConnectionID) protocol.StatelessResetToken + removeConnectionID func(protocol.ConnectionID) + retireConnectionID func(protocol.ConnectionID) + replaceWithClosed func([]protocol.ConnectionID, []byte) + queueControlFrame func(wire.Frame) +} + +func newConnIDGenerator( + initialConnectionID protocol.ConnectionID, + initialClientDestConnID *protocol.ConnectionID, // nil for the client + addConnectionID func(protocol.ConnectionID), + getStatelessResetToken func(protocol.ConnectionID) protocol.StatelessResetToken, + removeConnectionID func(protocol.ConnectionID), + retireConnectionID func(protocol.ConnectionID), + replaceWithClosed func([]protocol.ConnectionID, []byte), + queueControlFrame func(wire.Frame), + generator ConnectionIDGenerator, +) *connIDGenerator { + m := &connIDGenerator{ + generator: generator, + activeSrcConnIDs: make(map[uint64]protocol.ConnectionID), + addConnectionID: addConnectionID, + getStatelessResetToken: getStatelessResetToken, + removeConnectionID: removeConnectionID, + retireConnectionID: retireConnectionID, + replaceWithClosed: replaceWithClosed, + queueControlFrame: queueControlFrame, + } + m.activeSrcConnIDs[0] = initialConnectionID + m.initialClientDestConnID = initialClientDestConnID + return m +} + +func (m *connIDGenerator) SetMaxActiveConnIDs(limit uint64) error { + if m.generator.ConnectionIDLen() == 0 { + return nil + } + // The active_connection_id_limit transport parameter is the number of + // connection IDs the peer will store. This limit includes the connection ID + // used during the handshake, and the one sent in the preferred_address + // transport parameter. + // We currently don't send the preferred_address transport parameter, + // so we can issue (limit - 1) connection IDs. + for i := uint64(len(m.activeSrcConnIDs)); i < min(limit, protocol.MaxIssuedConnectionIDs); i++ { + if err := m.issueNewConnID(); err != nil { + return err + } + } + return nil +} + +func (m *connIDGenerator) Retire(seq uint64, sentWithDestConnID protocol.ConnectionID) error { + if seq > m.highestSeq { + return &qerr.TransportError{ + ErrorCode: qerr.ProtocolViolation, + ErrorMessage: fmt.Sprintf("retired connection ID %d (highest issued: %d)", seq, m.highestSeq), + } + } + connID, ok := m.activeSrcConnIDs[seq] + // We might already have deleted this connection ID, if this is a duplicate frame. + if !ok { + return nil + } + if connID == sentWithDestConnID { + return &qerr.TransportError{ + ErrorCode: qerr.ProtocolViolation, + ErrorMessage: fmt.Sprintf("retired connection ID %d (%s), which was used as the Destination Connection ID on this packet", seq, connID), + } + } + m.retireConnectionID(connID) + delete(m.activeSrcConnIDs, seq) + // Don't issue a replacement for the initial connection ID. + if seq == 0 { + return nil + } + return m.issueNewConnID() +} + +func (m *connIDGenerator) issueNewConnID() error { + connID, err := m.generator.GenerateConnectionID() + if err != nil { + return err + } + m.activeSrcConnIDs[m.highestSeq+1] = connID + m.addConnectionID(connID) + m.queueControlFrame(&wire.NewConnectionIDFrame{ + SequenceNumber: m.highestSeq + 1, + ConnectionID: connID, + StatelessResetToken: m.getStatelessResetToken(connID), + }) + m.highestSeq++ + return nil +} + +func (m *connIDGenerator) SetHandshakeComplete() { + if m.initialClientDestConnID != nil { + m.retireConnectionID(*m.initialClientDestConnID) + m.initialClientDestConnID = nil + } +} + +func (m *connIDGenerator) RemoveAll() { + if m.initialClientDestConnID != nil { + m.removeConnectionID(*m.initialClientDestConnID) + } + for _, connID := range m.activeSrcConnIDs { + m.removeConnectionID(connID) + } +} + +func (m *connIDGenerator) ReplaceWithClosed(connClose []byte) { + connIDs := make([]protocol.ConnectionID, 0, len(m.activeSrcConnIDs)+1) + if m.initialClientDestConnID != nil { + connIDs = append(connIDs, *m.initialClientDestConnID) + } + for _, connID := range m.activeSrcConnIDs { + connIDs = append(connIDs, connID) + } + m.replaceWithClosed(connIDs, connClose) +} diff --git a/vendor/github.com/quic-go/quic-go/conn_id_manager.go b/vendor/github.com/quic-go/quic-go/conn_id_manager.go new file mode 100644 index 00000000..4aa3f749 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/conn_id_manager.go @@ -0,0 +1,214 @@ +package quic + +import ( + "fmt" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/utils" + list "github.com/quic-go/quic-go/internal/utils/linkedlist" + "github.com/quic-go/quic-go/internal/wire" +) + +type newConnID struct { + SequenceNumber uint64 + ConnectionID protocol.ConnectionID + StatelessResetToken protocol.StatelessResetToken +} + +type connIDManager struct { + queue list.List[newConnID] + + handshakeComplete bool + activeSequenceNumber uint64 + highestRetired uint64 + activeConnectionID protocol.ConnectionID + activeStatelessResetToken *protocol.StatelessResetToken + + // We change the connection ID after sending on average + // protocol.PacketsPerConnectionID packets. The actual value is randomized + // hide the packet loss rate from on-path observers. + rand utils.Rand + packetsSinceLastChange uint32 + packetsPerConnectionID uint32 + + addStatelessResetToken func(protocol.StatelessResetToken) + removeStatelessResetToken func(protocol.StatelessResetToken) + queueControlFrame func(wire.Frame) +} + +func newConnIDManager( + initialDestConnID protocol.ConnectionID, + addStatelessResetToken func(protocol.StatelessResetToken), + removeStatelessResetToken func(protocol.StatelessResetToken), + queueControlFrame func(wire.Frame), +) *connIDManager { + return &connIDManager{ + activeConnectionID: initialDestConnID, + addStatelessResetToken: addStatelessResetToken, + removeStatelessResetToken: removeStatelessResetToken, + queueControlFrame: queueControlFrame, + } +} + +func (h *connIDManager) AddFromPreferredAddress(connID protocol.ConnectionID, resetToken protocol.StatelessResetToken) error { + return h.addConnectionID(1, connID, resetToken) +} + +func (h *connIDManager) Add(f *wire.NewConnectionIDFrame) error { + if err := h.add(f); err != nil { + return err + } + if h.queue.Len() >= protocol.MaxActiveConnectionIDs { + return &qerr.TransportError{ErrorCode: qerr.ConnectionIDLimitError} + } + return nil +} + +func (h *connIDManager) add(f *wire.NewConnectionIDFrame) error { + // If the NEW_CONNECTION_ID frame is reordered, such that its sequence number is smaller than the currently active + // connection ID or if it was already retired, send the RETIRE_CONNECTION_ID frame immediately. + if f.SequenceNumber < h.activeSequenceNumber || f.SequenceNumber < h.highestRetired { + h.queueControlFrame(&wire.RetireConnectionIDFrame{ + SequenceNumber: f.SequenceNumber, + }) + return nil + } + + // Retire elements in the queue. + // Doesn't retire the active connection ID. + if f.RetirePriorTo > h.highestRetired { + var next *list.Element[newConnID] + for el := h.queue.Front(); el != nil; el = next { + if el.Value.SequenceNumber >= f.RetirePriorTo { + break + } + next = el.Next() + h.queueControlFrame(&wire.RetireConnectionIDFrame{ + SequenceNumber: el.Value.SequenceNumber, + }) + h.queue.Remove(el) + } + h.highestRetired = f.RetirePriorTo + } + + if f.SequenceNumber == h.activeSequenceNumber { + return nil + } + + if err := h.addConnectionID(f.SequenceNumber, f.ConnectionID, f.StatelessResetToken); err != nil { + return err + } + + // Retire the active connection ID, if necessary. + if h.activeSequenceNumber < f.RetirePriorTo { + // The queue is guaranteed to have at least one element at this point. + h.updateConnectionID() + } + return nil +} + +func (h *connIDManager) addConnectionID(seq uint64, connID protocol.ConnectionID, resetToken protocol.StatelessResetToken) error { + // insert a new element at the end + if h.queue.Len() == 0 || h.queue.Back().Value.SequenceNumber < seq { + h.queue.PushBack(newConnID{ + SequenceNumber: seq, + ConnectionID: connID, + StatelessResetToken: resetToken, + }) + return nil + } + // insert a new element somewhere in the middle + for el := h.queue.Front(); el != nil; el = el.Next() { + if el.Value.SequenceNumber == seq { + if el.Value.ConnectionID != connID { + return fmt.Errorf("received conflicting connection IDs for sequence number %d", seq) + } + if el.Value.StatelessResetToken != resetToken { + return fmt.Errorf("received conflicting stateless reset tokens for sequence number %d", seq) + } + break + } + if el.Value.SequenceNumber > seq { + h.queue.InsertBefore(newConnID{ + SequenceNumber: seq, + ConnectionID: connID, + StatelessResetToken: resetToken, + }, el) + break + } + } + return nil +} + +func (h *connIDManager) updateConnectionID() { + h.queueControlFrame(&wire.RetireConnectionIDFrame{ + SequenceNumber: h.activeSequenceNumber, + }) + h.highestRetired = max(h.highestRetired, h.activeSequenceNumber) + if h.activeStatelessResetToken != nil { + h.removeStatelessResetToken(*h.activeStatelessResetToken) + } + + front := h.queue.Remove(h.queue.Front()) + h.activeSequenceNumber = front.SequenceNumber + h.activeConnectionID = front.ConnectionID + h.activeStatelessResetToken = &front.StatelessResetToken + h.packetsSinceLastChange = 0 + h.packetsPerConnectionID = protocol.PacketsPerConnectionID/2 + uint32(h.rand.Int31n(protocol.PacketsPerConnectionID)) + h.addStatelessResetToken(*h.activeStatelessResetToken) +} + +func (h *connIDManager) Close() { + if h.activeStatelessResetToken != nil { + h.removeStatelessResetToken(*h.activeStatelessResetToken) + } +} + +// is called when the server performs a Retry +// and when the server changes the connection ID in the first Initial sent +func (h *connIDManager) ChangeInitialConnID(newConnID protocol.ConnectionID) { + if h.activeSequenceNumber != 0 { + panic("expected first connection ID to have sequence number 0") + } + h.activeConnectionID = newConnID +} + +// is called when the server provides a stateless reset token in the transport parameters +func (h *connIDManager) SetStatelessResetToken(token protocol.StatelessResetToken) { + if h.activeSequenceNumber != 0 { + panic("expected first connection ID to have sequence number 0") + } + h.activeStatelessResetToken = &token + h.addStatelessResetToken(token) +} + +func (h *connIDManager) SentPacket() { + h.packetsSinceLastChange++ +} + +func (h *connIDManager) shouldUpdateConnID() bool { + if !h.handshakeComplete { + return false + } + // initiate the first change as early as possible (after handshake completion) + if h.queue.Len() > 0 && h.activeSequenceNumber == 0 { + return true + } + // For later changes, only change if + // 1. The queue of connection IDs is filled more than 50%. + // 2. We sent at least PacketsPerConnectionID packets + return 2*h.queue.Len() >= protocol.MaxActiveConnectionIDs && + h.packetsSinceLastChange >= h.packetsPerConnectionID +} + +func (h *connIDManager) Get() protocol.ConnectionID { + if h.shouldUpdateConnID() { + h.updateConnectionID() + } + return h.activeConnectionID +} + +func (h *connIDManager) SetHandshakeComplete() { + h.handshakeComplete = true +} diff --git a/vendor/github.com/quic-go/quic-go/connection.go b/vendor/github.com/quic-go/quic-go/connection.go new file mode 100644 index 00000000..1411a77b --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/connection.go @@ -0,0 +1,2333 @@ +package quic + +import ( + "bytes" + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "net" + "reflect" + "sync" + "sync/atomic" + "time" + + "github.com/quic-go/quic-go/internal/ackhandler" + "github.com/quic-go/quic-go/internal/flowcontrol" + "github.com/quic-go/quic-go/internal/handshake" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" + "github.com/quic-go/quic-go/logging" +) + +type unpacker interface { + UnpackLongHeader(hdr *wire.Header, data []byte) (*unpackedPacket, error) + UnpackShortHeader(rcvTime time.Time, data []byte) (protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, []byte, error) +} + +type streamManager interface { + GetOrOpenSendStream(protocol.StreamID) (sendStreamI, error) + GetOrOpenReceiveStream(protocol.StreamID) (receiveStreamI, error) + OpenStream() (Stream, error) + OpenUniStream() (SendStream, error) + OpenStreamSync(context.Context) (Stream, error) + OpenUniStreamSync(context.Context) (SendStream, error) + AcceptStream(context.Context) (Stream, error) + AcceptUniStream(context.Context) (ReceiveStream, error) + DeleteStream(protocol.StreamID) error + UpdateLimits(*wire.TransportParameters) + HandleMaxStreamsFrame(*wire.MaxStreamsFrame) + CloseWithError(error) + ResetFor0RTT() + UseResetMaps() +} + +type cryptoStreamHandler interface { + StartHandshake(context.Context) error + ChangeConnectionID(protocol.ConnectionID) + SetLargest1RTTAcked(protocol.PacketNumber) error + SetHandshakeConfirmed() + GetSessionTicket() ([]byte, error) + NextEvent() handshake.Event + DiscardInitialKeys() + HandleMessage([]byte, protocol.EncryptionLevel) error + io.Closer + ConnectionState() handshake.ConnectionState +} + +type receivedPacket struct { + buffer *packetBuffer + + remoteAddr net.Addr + rcvTime time.Time + data []byte + + ecn protocol.ECN + + info packetInfo // only valid if the contained IP address is valid +} + +func (p *receivedPacket) Size() protocol.ByteCount { return protocol.ByteCount(len(p.data)) } + +func (p *receivedPacket) Clone() *receivedPacket { + return &receivedPacket{ + remoteAddr: p.remoteAddr, + rcvTime: p.rcvTime, + data: p.data, + buffer: p.buffer, + ecn: p.ecn, + info: p.info, + } +} + +type connRunner interface { + Add(protocol.ConnectionID, packetHandler) bool + GetStatelessResetToken(protocol.ConnectionID) protocol.StatelessResetToken + Retire(protocol.ConnectionID) + Remove(protocol.ConnectionID) + ReplaceWithClosed([]protocol.ConnectionID, []byte) + AddResetToken(protocol.StatelessResetToken, packetHandler) + RemoveResetToken(protocol.StatelessResetToken) +} + +type closeError struct { + err error + remote bool + immediate bool +} + +type errCloseForRecreating struct { + nextPacketNumber protocol.PacketNumber + nextVersion protocol.Version +} + +func (e *errCloseForRecreating) Error() string { + return "closing connection in order to recreate it" +} + +var connTracingID atomic.Uint64 // to be accessed atomically +func nextConnTracingID() ConnectionTracingID { return ConnectionTracingID(connTracingID.Add(1)) } + +// A Connection is a QUIC connection +type connection struct { + // Destination connection ID used during the handshake. + // Used to check source connection ID on incoming packets. + handshakeDestConnID protocol.ConnectionID + // Set for the client. Destination connection ID used on the first Initial sent. + origDestConnID protocol.ConnectionID + retrySrcConnID *protocol.ConnectionID // only set for the client (and if a Retry was performed) + + srcConnIDLen int + + perspective protocol.Perspective + version protocol.Version + config *Config + + conn sendConn + sendQueue sender + + streamsMap streamManager + connIDManager *connIDManager + connIDGenerator *connIDGenerator + + rttStats *utils.RTTStats + + cryptoStreamManager *cryptoStreamManager + sentPacketHandler ackhandler.SentPacketHandler + receivedPacketHandler ackhandler.ReceivedPacketHandler + retransmissionQueue *retransmissionQueue + framer *framer + connFlowController flowcontrol.ConnectionFlowController + tokenStoreKey string // only set for the client + tokenGenerator *handshake.TokenGenerator // only set for the server + + unpacker unpacker + frameParser wire.FrameParser + packer packer + mtuDiscoverer mtuDiscoverer // initialized when the transport parameters are received + + maxPayloadSizeEstimate atomic.Uint32 + + initialStream *cryptoStream + handshakeStream *cryptoStream + oneRTTStream *cryptoStream // only set for the server + cryptoStreamHandler cryptoStreamHandler + + receivedPackets chan receivedPacket + sendingScheduled chan struct{} + + closeOnce sync.Once + // closeChan is used to notify the run loop that it should terminate + closeChan chan closeError + + ctx context.Context + ctxCancel context.CancelCauseFunc + handshakeCompleteChan chan struct{} + + undecryptablePackets []receivedPacket // undecryptable packets, waiting for a change in encryption level + undecryptablePacketsToProcess []receivedPacket + + earlyConnReadyChan chan struct{} + sentFirstPacket bool + droppedInitialKeys bool + handshakeComplete bool + handshakeConfirmed bool + + receivedRetry bool + versionNegotiated bool + receivedFirstPacket bool + + // the minimum of the max_idle_timeout values advertised by both endpoints + idleTimeout time.Duration + creationTime time.Time + // The idle timeout is set based on the max of the time we received the last packet... + lastPacketReceivedTime time.Time + // ... and the time we sent a new ack-eliciting packet after receiving a packet. + firstAckElicitingPacketAfterIdleSentTime time.Time + // pacingDeadline is the time when the next packet should be sent + pacingDeadline time.Time + + peerParams *wire.TransportParameters + + timer connectionTimer + // keepAlivePingSent stores whether a keep alive PING is in flight. + // It is reset as soon as we receive a packet from the peer. + keepAlivePingSent bool + keepAliveInterval time.Duration + + datagramQueue *datagramQueue + + connStateMutex sync.Mutex + connState ConnectionState + + logID string + tracer *logging.ConnectionTracer + logger utils.Logger +} + +var ( + _ Connection = &connection{} + _ EarlyConnection = &connection{} + _ streamSender = &connection{} +) + +var newConnection = func( + ctx context.Context, + ctxCancel context.CancelCauseFunc, + conn sendConn, + runner connRunner, + origDestConnID protocol.ConnectionID, + retrySrcConnID *protocol.ConnectionID, + clientDestConnID protocol.ConnectionID, + destConnID protocol.ConnectionID, + srcConnID protocol.ConnectionID, + connIDGenerator ConnectionIDGenerator, + statelessResetToken protocol.StatelessResetToken, + conf *Config, + tlsConf *tls.Config, + tokenGenerator *handshake.TokenGenerator, + clientAddressValidated bool, + tracer *logging.ConnectionTracer, + logger utils.Logger, + v protocol.Version, +) quicConn { + s := &connection{ + ctx: ctx, + ctxCancel: ctxCancel, + conn: conn, + config: conf, + handshakeDestConnID: destConnID, + srcConnIDLen: srcConnID.Len(), + tokenGenerator: tokenGenerator, + oneRTTStream: newCryptoStream(), + perspective: protocol.PerspectiveServer, + tracer: tracer, + logger: logger, + version: v, + } + if origDestConnID.Len() > 0 { + s.logID = origDestConnID.String() + } else { + s.logID = destConnID.String() + } + s.connIDManager = newConnIDManager( + destConnID, + func(token protocol.StatelessResetToken) { runner.AddResetToken(token, s) }, + runner.RemoveResetToken, + s.queueControlFrame, + ) + s.connIDGenerator = newConnIDGenerator( + srcConnID, + &clientDestConnID, + func(connID protocol.ConnectionID) { runner.Add(connID, s) }, + runner.GetStatelessResetToken, + runner.Remove, + runner.Retire, + runner.ReplaceWithClosed, + s.queueControlFrame, + connIDGenerator, + ) + s.preSetup() + s.sentPacketHandler, s.receivedPacketHandler = ackhandler.NewAckHandler( + 0, + protocol.ByteCount(s.config.InitialPacketSize), + s.rttStats, + clientAddressValidated, + s.conn.capabilities().ECN, + s.perspective, + s.tracer, + s.logger, + ) + s.maxPayloadSizeEstimate.Store(uint32(estimateMaxPayloadSize(protocol.ByteCount(s.config.InitialPacketSize)))) + params := &wire.TransportParameters{ + InitialMaxStreamDataBidiLocal: protocol.ByteCount(s.config.InitialStreamReceiveWindow), + InitialMaxStreamDataBidiRemote: protocol.ByteCount(s.config.InitialStreamReceiveWindow), + InitialMaxStreamDataUni: protocol.ByteCount(s.config.InitialStreamReceiveWindow), + InitialMaxData: protocol.ByteCount(s.config.InitialConnectionReceiveWindow), + MaxIdleTimeout: s.config.MaxIdleTimeout, + MaxBidiStreamNum: protocol.StreamNum(s.config.MaxIncomingStreams), + MaxUniStreamNum: protocol.StreamNum(s.config.MaxIncomingUniStreams), + MaxAckDelay: protocol.MaxAckDelayInclGranularity, + AckDelayExponent: protocol.AckDelayExponent, + MaxUDPPayloadSize: protocol.MaxPacketBufferSize, + DisableActiveMigration: true, + StatelessResetToken: &statelessResetToken, + OriginalDestinationConnectionID: origDestConnID, + // For interoperability with quic-go versions before May 2023, this value must be set to a value + // different from protocol.DefaultActiveConnectionIDLimit. + // If set to the default value, it will be omitted from the transport parameters, which will make + // old quic-go versions interpret it as 0, instead of the default value of 2. + // See https://github.com/quic-go/quic-go/pull/3806. + ActiveConnectionIDLimit: protocol.MaxActiveConnectionIDs, + InitialSourceConnectionID: srcConnID, + RetrySourceConnectionID: retrySrcConnID, + } + if s.config.EnableDatagrams { + params.MaxDatagramFrameSize = wire.MaxDatagramSize + } else { + params.MaxDatagramFrameSize = protocol.InvalidByteCount + } + if s.tracer != nil && s.tracer.SentTransportParameters != nil { + s.tracer.SentTransportParameters(params) + } + cs := handshake.NewCryptoSetupServer( + clientDestConnID, + conn.LocalAddr(), + conn.RemoteAddr(), + params, + tlsConf, + conf.Allow0RTT, + s.rttStats, + tracer, + logger, + s.version, + ) + s.cryptoStreamHandler = cs + s.packer = newPacketPacker(srcConnID, s.connIDManager.Get, s.initialStream, s.handshakeStream, s.sentPacketHandler, s.retransmissionQueue, cs, s.framer, s.receivedPacketHandler, s.datagramQueue, s.perspective) + s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen) + s.cryptoStreamManager = newCryptoStreamManager(s.initialStream, s.handshakeStream, s.oneRTTStream) + return s +} + +// declare this as a variable, such that we can it mock it in the tests +var newClientConnection = func( + ctx context.Context, + conn sendConn, + runner connRunner, + destConnID protocol.ConnectionID, + srcConnID protocol.ConnectionID, + connIDGenerator ConnectionIDGenerator, + conf *Config, + tlsConf *tls.Config, + initialPacketNumber protocol.PacketNumber, + enable0RTT bool, + hasNegotiatedVersion bool, + tracer *logging.ConnectionTracer, + logger utils.Logger, + v protocol.Version, +) quicConn { + s := &connection{ + conn: conn, + config: conf, + origDestConnID: destConnID, + handshakeDestConnID: destConnID, + srcConnIDLen: srcConnID.Len(), + perspective: protocol.PerspectiveClient, + logID: destConnID.String(), + logger: logger, + tracer: tracer, + versionNegotiated: hasNegotiatedVersion, + version: v, + } + s.connIDManager = newConnIDManager( + destConnID, + func(token protocol.StatelessResetToken) { runner.AddResetToken(token, s) }, + runner.RemoveResetToken, + s.queueControlFrame, + ) + s.connIDGenerator = newConnIDGenerator( + srcConnID, + nil, + func(connID protocol.ConnectionID) { runner.Add(connID, s) }, + runner.GetStatelessResetToken, + runner.Remove, + runner.Retire, + runner.ReplaceWithClosed, + s.queueControlFrame, + connIDGenerator, + ) + s.ctx, s.ctxCancel = context.WithCancelCause(ctx) + s.preSetup() + s.sentPacketHandler, s.receivedPacketHandler = ackhandler.NewAckHandler( + initialPacketNumber, + protocol.ByteCount(s.config.InitialPacketSize), + s.rttStats, + false, // has no effect + s.conn.capabilities().ECN, + s.perspective, + s.tracer, + s.logger, + ) + s.maxPayloadSizeEstimate.Store(uint32(estimateMaxPayloadSize(protocol.ByteCount(s.config.InitialPacketSize)))) + oneRTTStream := newCryptoStream() + params := &wire.TransportParameters{ + InitialMaxStreamDataBidiRemote: protocol.ByteCount(s.config.InitialStreamReceiveWindow), + InitialMaxStreamDataBidiLocal: protocol.ByteCount(s.config.InitialStreamReceiveWindow), + InitialMaxStreamDataUni: protocol.ByteCount(s.config.InitialStreamReceiveWindow), + InitialMaxData: protocol.ByteCount(s.config.InitialConnectionReceiveWindow), + MaxIdleTimeout: s.config.MaxIdleTimeout, + MaxBidiStreamNum: protocol.StreamNum(s.config.MaxIncomingStreams), + MaxUniStreamNum: protocol.StreamNum(s.config.MaxIncomingUniStreams), + MaxAckDelay: protocol.MaxAckDelayInclGranularity, + MaxUDPPayloadSize: protocol.MaxPacketBufferSize, + AckDelayExponent: protocol.AckDelayExponent, + DisableActiveMigration: true, + // For interoperability with quic-go versions before May 2023, this value must be set to a value + // different from protocol.DefaultActiveConnectionIDLimit. + // If set to the default value, it will be omitted from the transport parameters, which will make + // old quic-go versions interpret it as 0, instead of the default value of 2. + // See https://github.com/quic-go/quic-go/pull/3806. + ActiveConnectionIDLimit: protocol.MaxActiveConnectionIDs, + InitialSourceConnectionID: srcConnID, + } + if s.config.EnableDatagrams { + params.MaxDatagramFrameSize = wire.MaxDatagramSize + } else { + params.MaxDatagramFrameSize = protocol.InvalidByteCount + } + if s.tracer != nil && s.tracer.SentTransportParameters != nil { + s.tracer.SentTransportParameters(params) + } + cs := handshake.NewCryptoSetupClient( + destConnID, + params, + tlsConf, + enable0RTT, + s.rttStats, + tracer, + logger, + s.version, + ) + s.cryptoStreamHandler = cs + s.cryptoStreamManager = newCryptoStreamManager(s.initialStream, s.handshakeStream, oneRTTStream) + s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen) + s.packer = newPacketPacker(srcConnID, s.connIDManager.Get, s.initialStream, s.handshakeStream, s.sentPacketHandler, s.retransmissionQueue, cs, s.framer, s.receivedPacketHandler, s.datagramQueue, s.perspective) + if len(tlsConf.ServerName) > 0 { + s.tokenStoreKey = tlsConf.ServerName + } else { + s.tokenStoreKey = conn.RemoteAddr().String() + } + if s.config.TokenStore != nil { + if token := s.config.TokenStore.Pop(s.tokenStoreKey); token != nil { + s.packer.SetToken(token.data) + } + } + return s +} + +func (s *connection) preSetup() { + s.initialStream = newCryptoStream() + s.handshakeStream = newCryptoStream() + s.sendQueue = newSendQueue(s.conn) + s.retransmissionQueue = newRetransmissionQueue() + s.frameParser = *wire.NewFrameParser(s.config.EnableDatagrams) + s.rttStats = &utils.RTTStats{} + s.connFlowController = flowcontrol.NewConnectionFlowController( + protocol.ByteCount(s.config.InitialConnectionReceiveWindow), + protocol.ByteCount(s.config.MaxConnectionReceiveWindow), + func(size protocol.ByteCount) bool { + if s.config.AllowConnectionWindowIncrease == nil { + return true + } + return s.config.AllowConnectionWindowIncrease(s, uint64(size)) + }, + s.rttStats, + s.logger, + ) + s.earlyConnReadyChan = make(chan struct{}) + s.streamsMap = newStreamsMap( + s.ctx, + s, + s.queueControlFrame, + s.newFlowController, + uint64(s.config.MaxIncomingStreams), + uint64(s.config.MaxIncomingUniStreams), + s.perspective, + ) + s.framer = newFramer() + s.receivedPackets = make(chan receivedPacket, protocol.MaxConnUnprocessedPackets) + s.closeChan = make(chan closeError, 1) + s.sendingScheduled = make(chan struct{}, 1) + s.handshakeCompleteChan = make(chan struct{}) + + now := time.Now() + s.lastPacketReceivedTime = now + s.creationTime = now + + s.datagramQueue = newDatagramQueue(s.scheduleSending, s.logger) + s.connState.Version = s.version +} + +// run the connection main loop +func (s *connection) run() error { + var closeErr closeError + defer func() { s.ctxCancel(closeErr.err) }() + + s.timer = *newTimer() + + if err := s.cryptoStreamHandler.StartHandshake(s.ctx); err != nil { + return err + } + if err := s.handleHandshakeEvents(); err != nil { + return err + } + go func() { + if err := s.sendQueue.Run(); err != nil { + s.destroyImpl(err) + } + }() + + if s.perspective == protocol.PerspectiveClient { + s.scheduleSending() // so the ClientHello actually gets sent + } + + var sendQueueAvailable <-chan struct{} + +runLoop: + for { + if s.framer.QueuedTooManyControlFrames() { + s.closeLocal(&qerr.TransportError{ErrorCode: InternalError}) + } + // Close immediately if requested + select { + case closeErr = <-s.closeChan: + break runLoop + default: + } + + s.maybeResetTimer() + + var processedUndecryptablePacket bool + if len(s.undecryptablePacketsToProcess) > 0 { + queue := s.undecryptablePacketsToProcess + s.undecryptablePacketsToProcess = nil + for _, p := range queue { + if processed := s.handlePacketImpl(p); processed { + processedUndecryptablePacket = true + } + // Don't set timers and send packets if the packet made us close the connection. + select { + case closeErr = <-s.closeChan: + break runLoop + default: + } + } + } + // If we processed any undecryptable packets, jump to the resetting of the timers directly. + if !processedUndecryptablePacket { + select { + case closeErr = <-s.closeChan: + break runLoop + case <-s.timer.Chan(): + s.timer.SetRead() + // We do all the interesting stuff after the switch statement, so + // nothing to see here. + case <-s.sendingScheduled: + // We do all the interesting stuff after the switch statement, so + // nothing to see here. + case <-sendQueueAvailable: + case firstPacket := <-s.receivedPackets: + wasProcessed := s.handlePacketImpl(firstPacket) + // Don't set timers and send packets if the packet made us close the connection. + select { + case closeErr = <-s.closeChan: + break runLoop + default: + } + if s.handshakeComplete { + // Now process all packets in the receivedPackets channel. + // Limit the number of packets to the length of the receivedPackets channel, + // so we eventually get a chance to send out an ACK when receiving a lot of packets. + numPackets := len(s.receivedPackets) + receiveLoop: + for i := 0; i < numPackets; i++ { + select { + case p := <-s.receivedPackets: + if processed := s.handlePacketImpl(p); processed { + wasProcessed = true + } + select { + case closeErr = <-s.closeChan: + break runLoop + default: + } + default: + break receiveLoop + } + } + } + // Only reset the timers if this packet was actually processed. + // This avoids modifying any state when handling undecryptable packets, + // which could be injected by an attacker. + if !wasProcessed { + continue + } + } + } + + now := time.Now() + if timeout := s.sentPacketHandler.GetLossDetectionTimeout(); !timeout.IsZero() && timeout.Before(now) { + // This could cause packets to be retransmitted. + // Check it before trying to send packets. + if err := s.sentPacketHandler.OnLossDetectionTimeout(); err != nil { + s.closeLocal(err) + } + } + + if keepAliveTime := s.nextKeepAliveTime(); !keepAliveTime.IsZero() && !now.Before(keepAliveTime) { + // send a PING frame since there is no activity in the connection + s.logger.Debugf("Sending a keep-alive PING to keep the connection alive.") + s.framer.QueueControlFrame(&wire.PingFrame{}) + s.keepAlivePingSent = true + } else if !s.handshakeComplete && now.Sub(s.creationTime) >= s.config.handshakeTimeout() { + s.destroyImpl(qerr.ErrHandshakeTimeout) + continue + } else { + idleTimeoutStartTime := s.idleTimeoutStartTime() + if (!s.handshakeComplete && now.Sub(idleTimeoutStartTime) >= s.config.HandshakeIdleTimeout) || + (s.handshakeComplete && now.After(s.nextIdleTimeoutTime())) { + s.destroyImpl(qerr.ErrIdleTimeout) + continue + } + } + + if s.sendQueue.WouldBlock() { + // The send queue is still busy sending out packets. + // Wait until there's space to enqueue new packets. + sendQueueAvailable = s.sendQueue.Available() + continue + } + if err := s.triggerSending(now); err != nil { + s.closeLocal(err) + } + if s.sendQueue.WouldBlock() { + sendQueueAvailable = s.sendQueue.Available() + } else { + sendQueueAvailable = nil + } + } + + s.cryptoStreamHandler.Close() + s.sendQueue.Close() // close the send queue before sending the CONNECTION_CLOSE + s.handleCloseError(&closeErr) + if s.tracer != nil && s.tracer.Close != nil { + if e := (&errCloseForRecreating{}); !errors.As(closeErr.err, &e) { + s.tracer.Close() + } + } + s.logger.Infof("Connection %s closed.", s.logID) + s.timer.Stop() + return closeErr.err +} + +// blocks until the early connection can be used +func (s *connection) earlyConnReady() <-chan struct{} { + return s.earlyConnReadyChan +} + +func (s *connection) HandshakeComplete() <-chan struct{} { + return s.handshakeCompleteChan +} + +func (s *connection) Context() context.Context { + return s.ctx +} + +func (s *connection) supportsDatagrams() bool { + return s.peerParams.MaxDatagramFrameSize > 0 +} + +func (s *connection) ConnectionState() ConnectionState { + s.connStateMutex.Lock() + defer s.connStateMutex.Unlock() + cs := s.cryptoStreamHandler.ConnectionState() + s.connState.TLS = cs.ConnectionState + s.connState.Used0RTT = cs.Used0RTT + s.connState.GSO = s.conn.capabilities().GSO + return s.connState +} + +// Time when the connection should time out +func (s *connection) nextIdleTimeoutTime() time.Time { + idleTimeout := max(s.idleTimeout, s.rttStats.PTO(true)*3) + return s.idleTimeoutStartTime().Add(idleTimeout) +} + +// Time when the next keep-alive packet should be sent. +// It returns a zero time if no keep-alive should be sent. +func (s *connection) nextKeepAliveTime() time.Time { + if s.config.KeepAlivePeriod == 0 || s.keepAlivePingSent || !s.firstAckElicitingPacketAfterIdleSentTime.IsZero() { + return time.Time{} + } + keepAliveInterval := max(s.keepAliveInterval, s.rttStats.PTO(true)*3/2) + return s.lastPacketReceivedTime.Add(keepAliveInterval) +} + +func (s *connection) maybeResetTimer() { + var deadline time.Time + if !s.handshakeComplete { + deadline = s.creationTime.Add(s.config.handshakeTimeout()) + if t := s.idleTimeoutStartTime().Add(s.config.HandshakeIdleTimeout); t.Before(deadline) { + deadline = t + } + } else { + if keepAliveTime := s.nextKeepAliveTime(); !keepAliveTime.IsZero() { + deadline = keepAliveTime + } else { + deadline = s.nextIdleTimeoutTime() + } + } + + s.timer.SetTimer( + deadline, + s.receivedPacketHandler.GetAlarmTimeout(), + s.sentPacketHandler.GetLossDetectionTimeout(), + s.pacingDeadline, + ) +} + +func (s *connection) idleTimeoutStartTime() time.Time { + startTime := s.lastPacketReceivedTime + if t := s.firstAckElicitingPacketAfterIdleSentTime; t.After(startTime) { + startTime = t + } + return startTime +} + +func (s *connection) handleHandshakeComplete() error { + defer close(s.handshakeCompleteChan) + // Once the handshake completes, we have derived 1-RTT keys. + // There's no point in queueing undecryptable packets for later decryption anymore. + s.undecryptablePackets = nil + + s.connIDManager.SetHandshakeComplete() + s.connIDGenerator.SetHandshakeComplete() + + if s.tracer != nil && s.tracer.ChoseALPN != nil { + s.tracer.ChoseALPN(s.cryptoStreamHandler.ConnectionState().NegotiatedProtocol) + } + + // The server applies transport parameters right away, but the client side has to wait for handshake completion. + // During a 0-RTT connection, the client is only allowed to use the new transport parameters for 1-RTT packets. + if s.perspective == protocol.PerspectiveClient { + s.applyTransportParameters() + return nil + } + + // All these only apply to the server side. + if err := s.handleHandshakeConfirmed(); err != nil { + return err + } + + ticket, err := s.cryptoStreamHandler.GetSessionTicket() + if err != nil { + return err + } + if ticket != nil { // may be nil if session tickets are disabled via tls.Config.SessionTicketsDisabled + s.oneRTTStream.Write(ticket) + for s.oneRTTStream.HasData() { + s.queueControlFrame(s.oneRTTStream.PopCryptoFrame(protocol.MaxPostHandshakeCryptoFrameSize)) + } + } + token, err := s.tokenGenerator.NewToken(s.conn.RemoteAddr()) + if err != nil { + return err + } + s.queueControlFrame(&wire.NewTokenFrame{Token: token}) + s.queueControlFrame(&wire.HandshakeDoneFrame{}) + return nil +} + +func (s *connection) handleHandshakeConfirmed() error { + if err := s.dropEncryptionLevel(protocol.EncryptionHandshake); err != nil { + return err + } + + s.handshakeConfirmed = true + s.sentPacketHandler.SetHandshakeConfirmed() + s.cryptoStreamHandler.SetHandshakeConfirmed() + + if !s.config.DisablePathMTUDiscovery && s.conn.capabilities().DF { + s.mtuDiscoverer.Start() + } + return nil +} + +func (s *connection) handlePacketImpl(rp receivedPacket) bool { + s.sentPacketHandler.ReceivedBytes(rp.Size()) + + if wire.IsVersionNegotiationPacket(rp.data) { + s.handleVersionNegotiationPacket(rp) + return false + } + + var counter uint8 + var lastConnID protocol.ConnectionID + var processed bool + data := rp.data + p := rp + for len(data) > 0 { + if counter > 0 { + p = *(p.Clone()) + p.data = data + + destConnID, err := wire.ParseConnectionID(p.data, s.srcConnIDLen) + if err != nil { + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.InvalidPacketNumber, protocol.ByteCount(len(data)), logging.PacketDropHeaderParseError) + } + s.logger.Debugf("error parsing packet, couldn't parse connection ID: %s", err) + break + } + if destConnID != lastConnID { + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.InvalidPacketNumber, protocol.ByteCount(len(data)), logging.PacketDropUnknownConnectionID) + } + s.logger.Debugf("coalesced packet has different destination connection ID: %s, expected %s", destConnID, lastConnID) + break + } + } + + if wire.IsLongHeaderPacket(p.data[0]) { + hdr, packetData, rest, err := wire.ParsePacket(p.data) + if err != nil { + if s.tracer != nil && s.tracer.DroppedPacket != nil { + dropReason := logging.PacketDropHeaderParseError + if err == wire.ErrUnsupportedVersion { + dropReason = logging.PacketDropUnsupportedVersion + } + s.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.InvalidPacketNumber, protocol.ByteCount(len(data)), dropReason) + } + s.logger.Debugf("error parsing packet: %s", err) + break + } + lastConnID = hdr.DestConnectionID + + if hdr.Version != s.version { + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(logging.PacketTypeFromHeader(hdr), protocol.InvalidPacketNumber, protocol.ByteCount(len(data)), logging.PacketDropUnexpectedVersion) + } + s.logger.Debugf("Dropping packet with version %x. Expected %x.", hdr.Version, s.version) + break + } + + if counter > 0 { + p.buffer.Split() + } + counter++ + + // only log if this actually a coalesced packet + if s.logger.Debug() && (counter > 1 || len(rest) > 0) { + s.logger.Debugf("Parsed a coalesced packet. Part %d: %d bytes. Remaining: %d bytes.", counter, len(packetData), len(rest)) + } + + p.data = packetData + + if wasProcessed := s.handleLongHeaderPacket(p, hdr); wasProcessed { + processed = true + } + data = rest + } else { + if counter > 0 { + p.buffer.Split() + } + if wasProcessed := s.handleShortHeaderPacket(p); wasProcessed { + processed = true + } + break + } + } + + p.buffer.MaybeRelease() + return processed +} + +func (s *connection) handleShortHeaderPacket(p receivedPacket) bool { + var wasQueued bool + + defer func() { + // Put back the packet buffer if the packet wasn't queued for later decryption. + if !wasQueued { + p.buffer.Decrement() + } + }() + + destConnID, err := wire.ParseConnectionID(p.data, s.srcConnIDLen) + if err != nil { + s.tracer.DroppedPacket(logging.PacketType1RTT, protocol.InvalidPacketNumber, protocol.ByteCount(len(p.data)), logging.PacketDropHeaderParseError) + return false + } + pn, pnLen, keyPhase, data, err := s.unpacker.UnpackShortHeader(p.rcvTime, p.data) + if err != nil { + wasQueued = s.handleUnpackError(err, p, logging.PacketType1RTT) + return false + } + + if s.logger.Debug() { + s.logger.Debugf("<- Reading packet %d (%d bytes) for connection %s, 1-RTT", pn, p.Size(), destConnID) + wire.LogShortHeader(s.logger, destConnID, pn, pnLen, keyPhase) + } + + if s.receivedPacketHandler.IsPotentiallyDuplicate(pn, protocol.Encryption1RTT) { + s.logger.Debugf("Dropping (potentially) duplicate packet.") + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(logging.PacketType1RTT, pn, p.Size(), logging.PacketDropDuplicate) + } + return false + } + + var log func([]logging.Frame) + if s.tracer != nil && s.tracer.ReceivedShortHeaderPacket != nil { + log = func(frames []logging.Frame) { + s.tracer.ReceivedShortHeaderPacket( + &logging.ShortHeader{ + DestConnectionID: destConnID, + PacketNumber: pn, + PacketNumberLen: pnLen, + KeyPhase: keyPhase, + }, + p.Size(), + p.ecn, + frames, + ) + } + } + if err := s.handleUnpackedShortHeaderPacket(destConnID, pn, data, p.ecn, p.rcvTime, log); err != nil { + s.closeLocal(err) + return false + } + return true +} + +func (s *connection) handleLongHeaderPacket(p receivedPacket, hdr *wire.Header) bool /* was the packet successfully processed */ { + var wasQueued bool + + defer func() { + // Put back the packet buffer if the packet wasn't queued for later decryption. + if !wasQueued { + p.buffer.Decrement() + } + }() + + if hdr.Type == protocol.PacketTypeRetry { + return s.handleRetryPacket(hdr, p.data, p.rcvTime) + } + + // The server can change the source connection ID with the first Handshake packet. + // After this, all packets with a different source connection have to be ignored. + if s.receivedFirstPacket && hdr.Type == protocol.PacketTypeInitial && hdr.SrcConnectionID != s.handshakeDestConnID { + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(logging.PacketTypeInitial, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropUnknownConnectionID) + } + s.logger.Debugf("Dropping Initial packet (%d bytes) with unexpected source connection ID: %s (expected %s)", p.Size(), hdr.SrcConnectionID, s.handshakeDestConnID) + return false + } + // drop 0-RTT packets, if we are a client + if s.perspective == protocol.PerspectiveClient && hdr.Type == protocol.PacketType0RTT { + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(logging.PacketType0RTT, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropKeyUnavailable) + } + return false + } + + packet, err := s.unpacker.UnpackLongHeader(hdr, p.data) + if err != nil { + wasQueued = s.handleUnpackError(err, p, logging.PacketTypeFromHeader(hdr)) + return false + } + + if s.logger.Debug() { + s.logger.Debugf("<- Reading packet %d (%d bytes) for connection %s, %s", packet.hdr.PacketNumber, p.Size(), hdr.DestConnectionID, packet.encryptionLevel) + packet.hdr.Log(s.logger) + } + + if pn := packet.hdr.PacketNumber; s.receivedPacketHandler.IsPotentiallyDuplicate(pn, packet.encryptionLevel) { + s.logger.Debugf("Dropping (potentially) duplicate packet.") + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(logging.PacketTypeFromHeader(hdr), pn, p.Size(), logging.PacketDropDuplicate) + } + return false + } + + if err := s.handleUnpackedLongHeaderPacket(packet, p.ecn, p.rcvTime, p.Size()); err != nil { + s.closeLocal(err) + return false + } + return true +} + +func (s *connection) handleUnpackError(err error, p receivedPacket, pt logging.PacketType) (wasQueued bool) { + switch err { + case handshake.ErrKeysDropped: + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(pt, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropKeyUnavailable) + } + s.logger.Debugf("Dropping %s packet (%d bytes) because we already dropped the keys.", pt, p.Size()) + case handshake.ErrKeysNotYetAvailable: + // Sealer for this encryption level not yet available. + // Try again later. + s.tryQueueingUndecryptablePacket(p, pt) + return true + case wire.ErrInvalidReservedBits: + s.closeLocal(&qerr.TransportError{ + ErrorCode: qerr.ProtocolViolation, + ErrorMessage: err.Error(), + }) + case handshake.ErrDecryptionFailed: + // This might be a packet injected by an attacker. Drop it. + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(pt, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropPayloadDecryptError) + } + s.logger.Debugf("Dropping %s packet (%d bytes) that could not be unpacked. Error: %s", pt, p.Size(), err) + default: + var headerErr *headerParseError + if errors.As(err, &headerErr) { + // This might be a packet injected by an attacker. Drop it. + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(pt, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropHeaderParseError) + } + s.logger.Debugf("Dropping %s packet (%d bytes) for which we couldn't unpack the header. Error: %s", pt, p.Size(), err) + } else { + // This is an error returned by the AEAD (other than ErrDecryptionFailed). + // For example, a PROTOCOL_VIOLATION due to key updates. + s.closeLocal(err) + } + } + return false +} + +func (s *connection) handleRetryPacket(hdr *wire.Header, data []byte, rcvTime time.Time) bool /* was this a valid Retry */ { + if s.perspective == protocol.PerspectiveServer { + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(logging.PacketTypeRetry, protocol.InvalidPacketNumber, protocol.ByteCount(len(data)), logging.PacketDropUnexpectedPacket) + } + s.logger.Debugf("Ignoring Retry.") + return false + } + if s.receivedFirstPacket { + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(logging.PacketTypeRetry, protocol.InvalidPacketNumber, protocol.ByteCount(len(data)), logging.PacketDropUnexpectedPacket) + } + s.logger.Debugf("Ignoring Retry, since we already received a packet.") + return false + } + destConnID := s.connIDManager.Get() + if hdr.SrcConnectionID == destConnID { + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(logging.PacketTypeRetry, protocol.InvalidPacketNumber, protocol.ByteCount(len(data)), logging.PacketDropUnexpectedPacket) + } + s.logger.Debugf("Ignoring Retry, since the server didn't change the Source Connection ID.") + return false + } + // If a token is already set, this means that we already received a Retry from the server. + // Ignore this Retry packet. + if s.receivedRetry { + s.logger.Debugf("Ignoring Retry, since a Retry was already received.") + return false + } + + tag := handshake.GetRetryIntegrityTag(data[:len(data)-16], destConnID, hdr.Version) + if !bytes.Equal(data[len(data)-16:], tag[:]) { + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(logging.PacketTypeRetry, protocol.InvalidPacketNumber, protocol.ByteCount(len(data)), logging.PacketDropPayloadDecryptError) + } + s.logger.Debugf("Ignoring spoofed Retry. Integrity Tag doesn't match.") + return false + } + + if s.logger.Debug() { + s.logger.Debugf("<- Received Retry:") + (&wire.ExtendedHeader{Header: *hdr}).Log(s.logger) + s.logger.Debugf("Switching destination connection ID to: %s", hdr.SrcConnectionID) + } + if s.tracer != nil && s.tracer.ReceivedRetry != nil { + s.tracer.ReceivedRetry(hdr) + } + newDestConnID := hdr.SrcConnectionID + s.receivedRetry = true + if err := s.sentPacketHandler.ResetForRetry(rcvTime); err != nil { + s.closeLocal(err) + return false + } + s.handshakeDestConnID = newDestConnID + s.retrySrcConnID = &newDestConnID + s.cryptoStreamHandler.ChangeConnectionID(newDestConnID) + s.packer.SetToken(hdr.Token) + s.connIDManager.ChangeInitialConnID(newDestConnID) + s.scheduleSending() + return true +} + +func (s *connection) handleVersionNegotiationPacket(p receivedPacket) { + if s.perspective == protocol.PerspectiveServer || // servers never receive version negotiation packets + s.receivedFirstPacket || s.versionNegotiated { // ignore delayed / duplicated version negotiation packets + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(logging.PacketTypeVersionNegotiation, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropUnexpectedPacket) + } + return + } + + src, dest, supportedVersions, err := wire.ParseVersionNegotiationPacket(p.data) + if err != nil { + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(logging.PacketTypeVersionNegotiation, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropHeaderParseError) + } + s.logger.Debugf("Error parsing Version Negotiation packet: %s", err) + return + } + + for _, v := range supportedVersions { + if v == s.version { + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(logging.PacketTypeVersionNegotiation, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropUnexpectedVersion) + } + // The Version Negotiation packet contains the version that we offered. + // This might be a packet sent by an attacker, or it was corrupted. + return + } + } + + s.logger.Infof("Received a Version Negotiation packet. Supported Versions: %s", supportedVersions) + if s.tracer != nil && s.tracer.ReceivedVersionNegotiationPacket != nil { + s.tracer.ReceivedVersionNegotiationPacket(dest, src, supportedVersions) + } + newVersion, ok := protocol.ChooseSupportedVersion(s.config.Versions, supportedVersions) + if !ok { + s.destroyImpl(&VersionNegotiationError{ + Ours: s.config.Versions, + Theirs: supportedVersions, + }) + s.logger.Infof("No compatible QUIC version found.") + return + } + if s.tracer != nil && s.tracer.NegotiatedVersion != nil { + s.tracer.NegotiatedVersion(newVersion, s.config.Versions, supportedVersions) + } + + s.logger.Infof("Switching to QUIC version %s.", newVersion) + nextPN, _ := s.sentPacketHandler.PeekPacketNumber(protocol.EncryptionInitial) + s.destroyImpl(&errCloseForRecreating{ + nextPacketNumber: nextPN, + nextVersion: newVersion, + }) +} + +func (s *connection) handleUnpackedLongHeaderPacket( + packet *unpackedPacket, + ecn protocol.ECN, + rcvTime time.Time, + packetSize protocol.ByteCount, // only for logging +) error { + if !s.receivedFirstPacket { + s.receivedFirstPacket = true + if !s.versionNegotiated && s.tracer != nil && s.tracer.NegotiatedVersion != nil { + var clientVersions, serverVersions []protocol.Version + switch s.perspective { + case protocol.PerspectiveClient: + clientVersions = s.config.Versions + case protocol.PerspectiveServer: + serverVersions = s.config.Versions + } + s.tracer.NegotiatedVersion(s.version, clientVersions, serverVersions) + } + // The server can change the source connection ID with the first Handshake packet. + if s.perspective == protocol.PerspectiveClient && packet.hdr.SrcConnectionID != s.handshakeDestConnID { + cid := packet.hdr.SrcConnectionID + s.logger.Debugf("Received first packet. Switching destination connection ID to: %s", cid) + s.handshakeDestConnID = cid + s.connIDManager.ChangeInitialConnID(cid) + } + // We create the connection as soon as we receive the first packet from the client. + // We do that before authenticating the packet. + // That means that if the source connection ID was corrupted, + // we might have created a connection with an incorrect source connection ID. + // Once we authenticate the first packet, we need to update it. + if s.perspective == protocol.PerspectiveServer { + if packet.hdr.SrcConnectionID != s.handshakeDestConnID { + s.handshakeDestConnID = packet.hdr.SrcConnectionID + s.connIDManager.ChangeInitialConnID(packet.hdr.SrcConnectionID) + } + if s.tracer != nil && s.tracer.StartedConnection != nil { + s.tracer.StartedConnection( + s.conn.LocalAddr(), + s.conn.RemoteAddr(), + packet.hdr.SrcConnectionID, + packet.hdr.DestConnectionID, + ) + } + } + } + + if s.perspective == protocol.PerspectiveServer && packet.encryptionLevel == protocol.EncryptionHandshake && + !s.droppedInitialKeys { + // On the server side, Initial keys are dropped as soon as the first Handshake packet is received. + // See Section 4.9.1 of RFC 9001. + if err := s.dropEncryptionLevel(protocol.EncryptionInitial); err != nil { + return err + } + } + + s.lastPacketReceivedTime = rcvTime + s.firstAckElicitingPacketAfterIdleSentTime = time.Time{} + s.keepAlivePingSent = false + + var log func([]logging.Frame) + if s.tracer != nil && s.tracer.ReceivedLongHeaderPacket != nil { + log = func(frames []logging.Frame) { + s.tracer.ReceivedLongHeaderPacket(packet.hdr, packetSize, ecn, frames) + } + } + isAckEliciting, err := s.handleFrames(packet.data, packet.hdr.DestConnectionID, packet.encryptionLevel, log) + if err != nil { + return err + } + return s.receivedPacketHandler.ReceivedPacket(packet.hdr.PacketNumber, ecn, packet.encryptionLevel, rcvTime, isAckEliciting) +} + +func (s *connection) handleUnpackedShortHeaderPacket( + destConnID protocol.ConnectionID, + pn protocol.PacketNumber, + data []byte, + ecn protocol.ECN, + rcvTime time.Time, + log func([]logging.Frame), +) error { + s.lastPacketReceivedTime = rcvTime + s.firstAckElicitingPacketAfterIdleSentTime = time.Time{} + s.keepAlivePingSent = false + + isAckEliciting, err := s.handleFrames(data, destConnID, protocol.Encryption1RTT, log) + if err != nil { + return err + } + return s.receivedPacketHandler.ReceivedPacket(pn, ecn, protocol.Encryption1RTT, rcvTime, isAckEliciting) +} + +func (s *connection) handleFrames( + data []byte, + destConnID protocol.ConnectionID, + encLevel protocol.EncryptionLevel, + log func([]logging.Frame), +) (isAckEliciting bool, _ error) { + // Only used for tracing. + // If we're not tracing, this slice will always remain empty. + var frames []logging.Frame + if log != nil { + frames = make([]logging.Frame, 0, 4) + } + handshakeWasComplete := s.handshakeComplete + var handleErr error + for len(data) > 0 { + l, frame, err := s.frameParser.ParseNext(data, encLevel, s.version) + if err != nil { + return false, err + } + data = data[l:] + if frame == nil { + break + } + if ackhandler.IsFrameAckEliciting(frame) { + isAckEliciting = true + } + if log != nil { + frames = append(frames, toLoggingFrame(frame)) + } + // An error occurred handling a previous frame. + // Don't handle the current frame. + if handleErr != nil { + continue + } + if err := s.handleFrame(frame, encLevel, destConnID); err != nil { + if log == nil { + return false, err + } + // If we're logging, we need to keep parsing (but not handling) all frames. + handleErr = err + } + } + + if log != nil { + log(frames) + if handleErr != nil { + return false, handleErr + } + } + + // Handle completion of the handshake after processing all the frames. + // This ensures that we correctly handle the following case on the server side: + // We receive a Handshake packet that contains the CRYPTO frame that allows us to complete the handshake, + // and an ACK serialized after that CRYPTO frame. In this case, we still want to process the ACK frame. + if !handshakeWasComplete && s.handshakeComplete { + if err := s.handleHandshakeComplete(); err != nil { + return false, err + } + } + + return +} + +func (s *connection) handleFrame(f wire.Frame, encLevel protocol.EncryptionLevel, destConnID protocol.ConnectionID) error { + var err error + wire.LogFrame(s.logger, f, false) + switch frame := f.(type) { + case *wire.CryptoFrame: + err = s.handleCryptoFrame(frame, encLevel) + case *wire.StreamFrame: + err = s.handleStreamFrame(frame) + case *wire.AckFrame: + err = s.handleAckFrame(frame, encLevel) + case *wire.ConnectionCloseFrame: + s.handleConnectionCloseFrame(frame) + case *wire.ResetStreamFrame: + err = s.handleResetStreamFrame(frame) + case *wire.MaxDataFrame: + s.handleMaxDataFrame(frame) + case *wire.MaxStreamDataFrame: + err = s.handleMaxStreamDataFrame(frame) + case *wire.MaxStreamsFrame: + s.handleMaxStreamsFrame(frame) + case *wire.DataBlockedFrame: + case *wire.StreamDataBlockedFrame: + case *wire.StreamsBlockedFrame: + case *wire.StopSendingFrame: + err = s.handleStopSendingFrame(frame) + case *wire.PingFrame: + case *wire.PathChallengeFrame: + s.handlePathChallengeFrame(frame) + case *wire.PathResponseFrame: + // since we don't send PATH_CHALLENGEs, we don't expect PATH_RESPONSEs + err = errors.New("unexpected PATH_RESPONSE frame") + case *wire.NewTokenFrame: + err = s.handleNewTokenFrame(frame) + case *wire.NewConnectionIDFrame: + err = s.handleNewConnectionIDFrame(frame) + case *wire.RetireConnectionIDFrame: + err = s.handleRetireConnectionIDFrame(frame, destConnID) + case *wire.HandshakeDoneFrame: + err = s.handleHandshakeDoneFrame() + case *wire.DatagramFrame: + err = s.handleDatagramFrame(frame) + default: + err = fmt.Errorf("unexpected frame type: %s", reflect.ValueOf(&frame).Elem().Type().Name()) + } + return err +} + +// handlePacket is called by the server with a new packet +func (s *connection) handlePacket(p receivedPacket) { + // Discard packets once the amount of queued packets is larger than + // the channel size, protocol.MaxConnUnprocessedPackets + select { + case s.receivedPackets <- p: + default: + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropDOSPrevention) + } + } +} + +func (s *connection) handleConnectionCloseFrame(frame *wire.ConnectionCloseFrame) { + if frame.IsApplicationError { + s.closeRemote(&qerr.ApplicationError{ + Remote: true, + ErrorCode: qerr.ApplicationErrorCode(frame.ErrorCode), + ErrorMessage: frame.ReasonPhrase, + }) + return + } + s.closeRemote(&qerr.TransportError{ + Remote: true, + ErrorCode: qerr.TransportErrorCode(frame.ErrorCode), + FrameType: frame.FrameType, + ErrorMessage: frame.ReasonPhrase, + }) +} + +func (s *connection) handleCryptoFrame(frame *wire.CryptoFrame, encLevel protocol.EncryptionLevel) error { + if err := s.cryptoStreamManager.HandleCryptoFrame(frame, encLevel); err != nil { + return err + } + for { + data := s.cryptoStreamManager.GetCryptoData(encLevel) + if data == nil { + break + } + if err := s.cryptoStreamHandler.HandleMessage(data, encLevel); err != nil { + return err + } + } + return s.handleHandshakeEvents() +} + +func (s *connection) handleHandshakeEvents() error { + for { + ev := s.cryptoStreamHandler.NextEvent() + var err error + switch ev.Kind { + case handshake.EventNoEvent: + return nil + case handshake.EventHandshakeComplete: + // Don't call handleHandshakeComplete yet. + // It's advantageous to process ACK frames that might be serialized after the CRYPTO frame first. + s.handshakeComplete = true + case handshake.EventReceivedTransportParameters: + err = s.handleTransportParameters(ev.TransportParameters) + case handshake.EventRestoredTransportParameters: + s.restoreTransportParameters(ev.TransportParameters) + close(s.earlyConnReadyChan) + case handshake.EventReceivedReadKeys: + // Queue all packets for decryption that have been undecryptable so far. + s.undecryptablePacketsToProcess = s.undecryptablePackets + s.undecryptablePackets = nil + case handshake.EventDiscard0RTTKeys: + err = s.dropEncryptionLevel(protocol.Encryption0RTT) + case handshake.EventWriteInitialData: + _, err = s.initialStream.Write(ev.Data) + case handshake.EventWriteHandshakeData: + _, err = s.handshakeStream.Write(ev.Data) + } + if err != nil { + return err + } + } +} + +func (s *connection) handleStreamFrame(frame *wire.StreamFrame) error { + str, err := s.streamsMap.GetOrOpenReceiveStream(frame.StreamID) + if err != nil { + return err + } + if str == nil { + // Stream is closed and already garbage collected + // ignore this StreamFrame + return nil + } + return str.handleStreamFrame(frame) +} + +func (s *connection) handleMaxDataFrame(frame *wire.MaxDataFrame) { + s.connFlowController.UpdateSendWindow(frame.MaximumData) +} + +func (s *connection) handleMaxStreamDataFrame(frame *wire.MaxStreamDataFrame) error { + str, err := s.streamsMap.GetOrOpenSendStream(frame.StreamID) + if err != nil { + return err + } + if str == nil { + // stream is closed and already garbage collected + return nil + } + str.updateSendWindow(frame.MaximumStreamData) + return nil +} + +func (s *connection) handleMaxStreamsFrame(frame *wire.MaxStreamsFrame) { + s.streamsMap.HandleMaxStreamsFrame(frame) +} + +func (s *connection) handleResetStreamFrame(frame *wire.ResetStreamFrame) error { + str, err := s.streamsMap.GetOrOpenReceiveStream(frame.StreamID) + if err != nil { + return err + } + if str == nil { + // stream is closed and already garbage collected + return nil + } + return str.handleResetStreamFrame(frame) +} + +func (s *connection) handleStopSendingFrame(frame *wire.StopSendingFrame) error { + str, err := s.streamsMap.GetOrOpenSendStream(frame.StreamID) + if err != nil { + return err + } + if str == nil { + // stream is closed and already garbage collected + return nil + } + str.handleStopSendingFrame(frame) + return nil +} + +func (s *connection) handlePathChallengeFrame(frame *wire.PathChallengeFrame) { + s.queueControlFrame(&wire.PathResponseFrame{Data: frame.Data}) +} + +func (s *connection) handleNewTokenFrame(frame *wire.NewTokenFrame) error { + if s.perspective == protocol.PerspectiveServer { + return &qerr.TransportError{ + ErrorCode: qerr.ProtocolViolation, + ErrorMessage: "received NEW_TOKEN frame from the client", + } + } + if s.config.TokenStore != nil { + s.config.TokenStore.Put(s.tokenStoreKey, &ClientToken{data: frame.Token}) + } + return nil +} + +func (s *connection) handleNewConnectionIDFrame(f *wire.NewConnectionIDFrame) error { + return s.connIDManager.Add(f) +} + +func (s *connection) handleRetireConnectionIDFrame(f *wire.RetireConnectionIDFrame, destConnID protocol.ConnectionID) error { + return s.connIDGenerator.Retire(f.SequenceNumber, destConnID) +} + +func (s *connection) handleHandshakeDoneFrame() error { + if s.perspective == protocol.PerspectiveServer { + return &qerr.TransportError{ + ErrorCode: qerr.ProtocolViolation, + ErrorMessage: "received a HANDSHAKE_DONE frame", + } + } + if !s.handshakeConfirmed { + return s.handleHandshakeConfirmed() + } + return nil +} + +func (s *connection) handleAckFrame(frame *wire.AckFrame, encLevel protocol.EncryptionLevel) error { + acked1RTTPacket, err := s.sentPacketHandler.ReceivedAck(frame, encLevel, s.lastPacketReceivedTime) + if err != nil { + return err + } + if !acked1RTTPacket { + return nil + } + // On the client side: If the packet acknowledged a 1-RTT packet, this confirms the handshake. + // This is only possible if the ACK was sent in a 1-RTT packet. + // This is an optimization over simply waiting for a HANDSHAKE_DONE frame, see section 4.1.2 of RFC 9001. + if s.perspective == protocol.PerspectiveClient && !s.handshakeConfirmed { + if err := s.handleHandshakeConfirmed(); err != nil { + return err + } + } + return s.cryptoStreamHandler.SetLargest1RTTAcked(frame.LargestAcked()) +} + +func (s *connection) handleDatagramFrame(f *wire.DatagramFrame) error { + if f.Length(s.version) > wire.MaxDatagramSize { + return &qerr.TransportError{ + ErrorCode: qerr.ProtocolViolation, + ErrorMessage: "DATAGRAM frame too large", + } + } + s.datagramQueue.HandleDatagramFrame(f) + return nil +} + +// closeLocal closes the connection and send a CONNECTION_CLOSE containing the error +func (s *connection) closeLocal(e error) { + s.closeOnce.Do(func() { + if e == nil { + s.logger.Infof("Closing connection.") + } else { + s.logger.Errorf("Closing connection with error: %s", e) + } + s.closeChan <- closeError{err: e, immediate: false, remote: false} + }) +} + +// destroy closes the connection without sending the error on the wire +func (s *connection) destroy(e error) { + s.destroyImpl(e) + <-s.ctx.Done() +} + +func (s *connection) destroyImpl(e error) { + s.closeOnce.Do(func() { + if nerr, ok := e.(net.Error); ok && nerr.Timeout() { + s.logger.Errorf("Destroying connection: %s", e) + } else { + s.logger.Errorf("Destroying connection with error: %s", e) + } + s.closeChan <- closeError{err: e, immediate: true, remote: false} + }) +} + +func (s *connection) closeRemote(e error) { + s.closeOnce.Do(func() { + s.logger.Errorf("Peer closed connection with error: %s", e) + s.closeChan <- closeError{err: e, immediate: true, remote: true} + }) +} + +func (s *connection) CloseWithError(code ApplicationErrorCode, desc string) error { + s.closeLocal(&qerr.ApplicationError{ + ErrorCode: code, + ErrorMessage: desc, + }) + <-s.ctx.Done() + return nil +} + +func (s *connection) closeWithTransportError(code TransportErrorCode) { + s.closeLocal(&qerr.TransportError{ErrorCode: code}) + <-s.ctx.Done() +} + +func (s *connection) handleCloseError(closeErr *closeError) { + e := closeErr.err + if e == nil { + e = &qerr.ApplicationError{} + } else { + defer func() { + closeErr.err = e + }() + } + + var ( + statelessResetErr *StatelessResetError + versionNegotiationErr *VersionNegotiationError + recreateErr *errCloseForRecreating + applicationErr *ApplicationError + transportErr *TransportError + ) + switch { + case errors.Is(e, qerr.ErrIdleTimeout), + errors.Is(e, qerr.ErrHandshakeTimeout), + errors.As(e, &statelessResetErr), + errors.As(e, &versionNegotiationErr), + errors.As(e, &recreateErr), + errors.As(e, &applicationErr), + errors.As(e, &transportErr): + default: + e = &qerr.TransportError{ + ErrorCode: qerr.InternalError, + ErrorMessage: e.Error(), + } + } + + s.streamsMap.CloseWithError(e) + s.connIDManager.Close() + if s.datagramQueue != nil { + s.datagramQueue.CloseWithError(e) + } + + if s.tracer != nil && s.tracer.ClosedConnection != nil && !errors.As(e, &recreateErr) { + s.tracer.ClosedConnection(e) + } + + // If this is a remote close we're done here + if closeErr.remote { + s.connIDGenerator.ReplaceWithClosed(nil) + return + } + if closeErr.immediate { + s.connIDGenerator.RemoveAll() + return + } + // Don't send out any CONNECTION_CLOSE if this is an error that occurred + // before we even sent out the first packet. + if s.perspective == protocol.PerspectiveClient && !s.sentFirstPacket { + s.connIDGenerator.RemoveAll() + return + } + connClosePacket, err := s.sendConnectionClose(e) + if err != nil { + s.logger.Debugf("Error sending CONNECTION_CLOSE: %s", err) + } + s.connIDGenerator.ReplaceWithClosed(connClosePacket) +} + +func (s *connection) dropEncryptionLevel(encLevel protocol.EncryptionLevel) error { + if s.tracer != nil && s.tracer.DroppedEncryptionLevel != nil { + s.tracer.DroppedEncryptionLevel(encLevel) + } + s.sentPacketHandler.DropPackets(encLevel) + s.receivedPacketHandler.DropPackets(encLevel) + //nolint:exhaustive // only Initial and 0-RTT need special treatment + switch encLevel { + case protocol.EncryptionInitial: + s.droppedInitialKeys = true + s.cryptoStreamHandler.DiscardInitialKeys() + case protocol.Encryption0RTT: + s.streamsMap.ResetFor0RTT() + s.framer.Handle0RTTRejection() + return s.connFlowController.Reset() + } + return s.cryptoStreamManager.Drop(encLevel) +} + +// is called for the client, when restoring transport parameters saved for 0-RTT +func (s *connection) restoreTransportParameters(params *wire.TransportParameters) { + if s.logger.Debug() { + s.logger.Debugf("Restoring Transport Parameters: %s", params) + } + + s.peerParams = params + s.connIDGenerator.SetMaxActiveConnIDs(params.ActiveConnectionIDLimit) + s.connFlowController.UpdateSendWindow(params.InitialMaxData) + s.streamsMap.UpdateLimits(params) + s.connStateMutex.Lock() + s.connState.SupportsDatagrams = s.supportsDatagrams() + s.connStateMutex.Unlock() +} + +func (s *connection) handleTransportParameters(params *wire.TransportParameters) error { + if s.tracer != nil && s.tracer.ReceivedTransportParameters != nil { + s.tracer.ReceivedTransportParameters(params) + } + if err := s.checkTransportParameters(params); err != nil { + return &qerr.TransportError{ + ErrorCode: qerr.TransportParameterError, + ErrorMessage: err.Error(), + } + } + + if s.perspective == protocol.PerspectiveClient && s.peerParams != nil && s.ConnectionState().Used0RTT && !params.ValidForUpdate(s.peerParams) { + return &qerr.TransportError{ + ErrorCode: qerr.ProtocolViolation, + ErrorMessage: "server sent reduced limits after accepting 0-RTT data", + } + } + + s.peerParams = params + // On the client side we have to wait for handshake completion. + // During a 0-RTT connection, we are only allowed to use the new transport parameters for 1-RTT packets. + if s.perspective == protocol.PerspectiveServer { + s.applyTransportParameters() + // On the server side, the early connection is ready as soon as we processed + // the client's transport parameters. + close(s.earlyConnReadyChan) + } + + s.connStateMutex.Lock() + s.connState.SupportsDatagrams = s.supportsDatagrams() + s.connStateMutex.Unlock() + return nil +} + +func (s *connection) checkTransportParameters(params *wire.TransportParameters) error { + if s.logger.Debug() { + s.logger.Debugf("Processed Transport Parameters: %s", params) + } + + // check the initial_source_connection_id + if params.InitialSourceConnectionID != s.handshakeDestConnID { + return fmt.Errorf("expected initial_source_connection_id to equal %s, is %s", s.handshakeDestConnID, params.InitialSourceConnectionID) + } + + if s.perspective == protocol.PerspectiveServer { + return nil + } + // check the original_destination_connection_id + if params.OriginalDestinationConnectionID != s.origDestConnID { + return fmt.Errorf("expected original_destination_connection_id to equal %s, is %s", s.origDestConnID, params.OriginalDestinationConnectionID) + } + if s.retrySrcConnID != nil { // a Retry was performed + if params.RetrySourceConnectionID == nil { + return errors.New("missing retry_source_connection_id") + } + if *params.RetrySourceConnectionID != *s.retrySrcConnID { + return fmt.Errorf("expected retry_source_connection_id to equal %s, is %s", s.retrySrcConnID, *params.RetrySourceConnectionID) + } + } else if params.RetrySourceConnectionID != nil { + return errors.New("received retry_source_connection_id, although no Retry was performed") + } + return nil +} + +func (s *connection) applyTransportParameters() { + params := s.peerParams + // Our local idle timeout will always be > 0. + s.idleTimeout = s.config.MaxIdleTimeout + // If the peer advertised an idle timeout, take the minimum of the values. + if params.MaxIdleTimeout > 0 { + s.idleTimeout = min(s.idleTimeout, params.MaxIdleTimeout) + } + s.keepAliveInterval = min(s.config.KeepAlivePeriod, min(s.idleTimeout/2, protocol.MaxKeepAliveInterval)) + s.streamsMap.UpdateLimits(params) + s.frameParser.SetAckDelayExponent(params.AckDelayExponent) + s.connFlowController.UpdateSendWindow(params.InitialMaxData) + s.rttStats.SetMaxAckDelay(params.MaxAckDelay) + s.connIDGenerator.SetMaxActiveConnIDs(params.ActiveConnectionIDLimit) + if params.StatelessResetToken != nil { + s.connIDManager.SetStatelessResetToken(*params.StatelessResetToken) + } + // We don't support connection migration yet, so we don't have any use for the preferred_address. + if params.PreferredAddress != nil { + // Retire the connection ID. + s.connIDManager.AddFromPreferredAddress(params.PreferredAddress.ConnectionID, params.PreferredAddress.StatelessResetToken) + } + maxPacketSize := protocol.ByteCount(protocol.MaxPacketBufferSize) + if params.MaxUDPPayloadSize > 0 && params.MaxUDPPayloadSize < maxPacketSize { + maxPacketSize = params.MaxUDPPayloadSize + } + s.mtuDiscoverer = newMTUDiscoverer( + s.rttStats, + protocol.ByteCount(s.config.InitialPacketSize), + maxPacketSize, + s.onMTUIncreased, + s.tracer, + ) +} + +func (s *connection) triggerSending(now time.Time) error { + s.pacingDeadline = time.Time{} + + sendMode := s.sentPacketHandler.SendMode(now) + //nolint:exhaustive // No need to handle pacing limited here. + switch sendMode { + case ackhandler.SendAny: + return s.sendPackets(now) + case ackhandler.SendNone: + return nil + case ackhandler.SendPacingLimited: + deadline := s.sentPacketHandler.TimeUntilSend() + if deadline.IsZero() { + deadline = deadlineSendImmediately + } + s.pacingDeadline = deadline + // Allow sending of an ACK if we're pacing limit. + // This makes sure that a peer that is mostly receiving data (and thus has an inaccurate cwnd estimate) + // sends enough ACKs to allow its peer to utilize the bandwidth. + fallthrough + case ackhandler.SendAck: + // We can at most send a single ACK only packet. + // There will only be a new ACK after receiving new packets. + // SendAck is only returned when we're congestion limited, so we don't need to set the pacinggs timer. + return s.maybeSendAckOnlyPacket(now) + case ackhandler.SendPTOInitial: + if err := s.sendProbePacket(protocol.EncryptionInitial, now); err != nil { + return err + } + if s.sendQueue.WouldBlock() { + s.scheduleSending() + return nil + } + return s.triggerSending(now) + case ackhandler.SendPTOHandshake: + if err := s.sendProbePacket(protocol.EncryptionHandshake, now); err != nil { + return err + } + if s.sendQueue.WouldBlock() { + s.scheduleSending() + return nil + } + return s.triggerSending(now) + case ackhandler.SendPTOAppData: + if err := s.sendProbePacket(protocol.Encryption1RTT, now); err != nil { + return err + } + if s.sendQueue.WouldBlock() { + s.scheduleSending() + return nil + } + return s.triggerSending(now) + default: + return fmt.Errorf("BUG: invalid send mode %d", sendMode) + } +} + +func (s *connection) sendPackets(now time.Time) error { + // Path MTU Discovery + // Can't use GSO, since we need to send a single packet that's larger than our current maximum size. + // Performance-wise, this doesn't matter, since we only send a very small (<10) number of + // MTU probe packets per connection. + if s.handshakeConfirmed && s.mtuDiscoverer != nil && s.mtuDiscoverer.ShouldSendProbe(now) { + ping, size := s.mtuDiscoverer.GetPing() + p, buf, err := s.packer.PackMTUProbePacket(ping, size, s.version) + if err != nil { + return err + } + ecn := s.sentPacketHandler.ECNMode(true) + s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.StreamFrames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, ecn, buf.Len(), false) + s.registerPackedShortHeaderPacket(p, ecn, now) + s.sendQueue.Send(buf, 0, ecn) + // This is kind of a hack. We need to trigger sending again somehow. + s.pacingDeadline = deadlineSendImmediately + return nil + } + + if isBlocked, offset := s.connFlowController.IsNewlyBlocked(); isBlocked { + s.framer.QueueControlFrame(&wire.DataBlockedFrame{MaximumData: offset}) + } + if offset := s.connFlowController.GetWindowUpdate(); offset > 0 { + s.framer.QueueControlFrame(&wire.MaxDataFrame{MaximumData: offset}) + } + if cf := s.cryptoStreamManager.GetPostHandshakeData(protocol.MaxPostHandshakeCryptoFrameSize); cf != nil { + s.queueControlFrame(cf) + } + + if !s.handshakeConfirmed { + packet, err := s.packer.PackCoalescedPacket(false, s.maxPacketSize(), s.version) + if err != nil || packet == nil { + return err + } + s.sentFirstPacket = true + if err := s.sendPackedCoalescedPacket(packet, s.sentPacketHandler.ECNMode(packet.IsOnlyShortHeaderPacket()), now); err != nil { + return err + } + sendMode := s.sentPacketHandler.SendMode(now) + if sendMode == ackhandler.SendPacingLimited { + s.resetPacingDeadline() + } else if sendMode == ackhandler.SendAny { + s.pacingDeadline = deadlineSendImmediately + } + return nil + } + + if s.conn.capabilities().GSO { + return s.sendPacketsWithGSO(now) + } + return s.sendPacketsWithoutGSO(now) +} + +func (s *connection) sendPacketsWithoutGSO(now time.Time) error { + for { + buf := getPacketBuffer() + ecn := s.sentPacketHandler.ECNMode(true) + if _, err := s.appendOneShortHeaderPacket(buf, s.maxPacketSize(), ecn, now); err != nil { + if err == errNothingToPack { + buf.Release() + return nil + } + return err + } + + s.sendQueue.Send(buf, 0, ecn) + + if s.sendQueue.WouldBlock() { + return nil + } + sendMode := s.sentPacketHandler.SendMode(now) + if sendMode == ackhandler.SendPacingLimited { + s.resetPacingDeadline() + return nil + } + if sendMode != ackhandler.SendAny { + return nil + } + // Prioritize receiving of packets over sending out more packets. + if len(s.receivedPackets) > 0 { + s.pacingDeadline = deadlineSendImmediately + return nil + } + } +} + +func (s *connection) sendPacketsWithGSO(now time.Time) error { + buf := getLargePacketBuffer() + maxSize := s.maxPacketSize() + + ecn := s.sentPacketHandler.ECNMode(true) + for { + var dontSendMore bool + size, err := s.appendOneShortHeaderPacket(buf, maxSize, ecn, now) + if err != nil { + if err != errNothingToPack { + return err + } + if buf.Len() == 0 { + buf.Release() + return nil + } + dontSendMore = true + } + + if !dontSendMore { + sendMode := s.sentPacketHandler.SendMode(now) + if sendMode == ackhandler.SendPacingLimited { + s.resetPacingDeadline() + } + if sendMode != ackhandler.SendAny { + dontSendMore = true + } + } + + // Don't send more packets in this batch if they require a different ECN marking than the previous ones. + nextECN := s.sentPacketHandler.ECNMode(true) + + // Append another packet if + // 1. The congestion controller and pacer allow sending more + // 2. The last packet appended was a full-size packet + // 3. The next packet will have the same ECN marking + // 4. We still have enough space for another full-size packet in the buffer + if !dontSendMore && size == maxSize && nextECN == ecn && buf.Len()+maxSize <= buf.Cap() { + continue + } + + s.sendQueue.Send(buf, uint16(maxSize), ecn) + + if dontSendMore { + return nil + } + if s.sendQueue.WouldBlock() { + return nil + } + + // Prioritize receiving of packets over sending out more packets. + if len(s.receivedPackets) > 0 { + s.pacingDeadline = deadlineSendImmediately + return nil + } + + buf = getLargePacketBuffer() + } +} + +func (s *connection) resetPacingDeadline() { + deadline := s.sentPacketHandler.TimeUntilSend() + if deadline.IsZero() { + deadline = deadlineSendImmediately + } + s.pacingDeadline = deadline +} + +func (s *connection) maybeSendAckOnlyPacket(now time.Time) error { + if !s.handshakeConfirmed { + ecn := s.sentPacketHandler.ECNMode(false) + packet, err := s.packer.PackCoalescedPacket(true, s.maxPacketSize(), s.version) + if err != nil { + return err + } + if packet == nil { + return nil + } + return s.sendPackedCoalescedPacket(packet, ecn, now) + } + + ecn := s.sentPacketHandler.ECNMode(true) + p, buf, err := s.packer.PackAckOnlyPacket(s.maxPacketSize(), s.version) + if err != nil { + if err == errNothingToPack { + return nil + } + return err + } + s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.StreamFrames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, ecn, buf.Len(), false) + s.registerPackedShortHeaderPacket(p, ecn, now) + s.sendQueue.Send(buf, 0, ecn) + return nil +} + +func (s *connection) sendProbePacket(encLevel protocol.EncryptionLevel, now time.Time) error { + // Queue probe packets until we actually send out a packet, + // or until there are no more packets to queue. + var packet *coalescedPacket + for { + if wasQueued := s.sentPacketHandler.QueueProbePacket(encLevel); !wasQueued { + break + } + var err error + packet, err = s.packer.MaybePackProbePacket(encLevel, s.maxPacketSize(), s.version) + if err != nil { + return err + } + if packet != nil { + break + } + } + if packet == nil { + s.retransmissionQueue.AddPing(encLevel) + var err error + packet, err = s.packer.MaybePackProbePacket(encLevel, s.maxPacketSize(), s.version) + if err != nil { + return err + } + } + if packet == nil || (len(packet.longHdrPackets) == 0 && packet.shortHdrPacket == nil) { + return fmt.Errorf("connection BUG: couldn't pack %s probe packet", encLevel) + } + return s.sendPackedCoalescedPacket(packet, s.sentPacketHandler.ECNMode(packet.IsOnlyShortHeaderPacket()), now) +} + +// appendOneShortHeaderPacket appends a new packet to the given packetBuffer. +// If there was nothing to pack, the returned size is 0. +func (s *connection) appendOneShortHeaderPacket(buf *packetBuffer, maxSize protocol.ByteCount, ecn protocol.ECN, now time.Time) (protocol.ByteCount, error) { + startLen := buf.Len() + p, err := s.packer.AppendPacket(buf, maxSize, s.version) + if err != nil { + return 0, err + } + size := buf.Len() - startLen + s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.StreamFrames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, ecn, size, false) + s.registerPackedShortHeaderPacket(p, ecn, now) + return size, nil +} + +func (s *connection) registerPackedShortHeaderPacket(p shortHeaderPacket, ecn protocol.ECN, now time.Time) { + if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && (len(p.StreamFrames) > 0 || ackhandler.HasAckElicitingFrames(p.Frames)) { + s.firstAckElicitingPacketAfterIdleSentTime = now + } + + largestAcked := protocol.InvalidPacketNumber + if p.Ack != nil { + largestAcked = p.Ack.LargestAcked() + } + s.sentPacketHandler.SentPacket(now, p.PacketNumber, largestAcked, p.StreamFrames, p.Frames, protocol.Encryption1RTT, ecn, p.Length, p.IsPathMTUProbePacket) + s.connIDManager.SentPacket() +} + +func (s *connection) sendPackedCoalescedPacket(packet *coalescedPacket, ecn protocol.ECN, now time.Time) error { + s.logCoalescedPacket(packet, ecn) + for _, p := range packet.longHdrPackets { + if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && p.IsAckEliciting() { + s.firstAckElicitingPacketAfterIdleSentTime = now + } + largestAcked := protocol.InvalidPacketNumber + if p.ack != nil { + largestAcked = p.ack.LargestAcked() + } + s.sentPacketHandler.SentPacket(now, p.header.PacketNumber, largestAcked, p.streamFrames, p.frames, p.EncryptionLevel(), ecn, p.length, false) + if s.perspective == protocol.PerspectiveClient && p.EncryptionLevel() == protocol.EncryptionHandshake && + !s.droppedInitialKeys { + // On the client side, Initial keys are dropped as soon as the first Handshake packet is sent. + // See Section 4.9.1 of RFC 9001. + if err := s.dropEncryptionLevel(protocol.EncryptionInitial); err != nil { + return err + } + } + } + if p := packet.shortHdrPacket; p != nil { + if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && p.IsAckEliciting() { + s.firstAckElicitingPacketAfterIdleSentTime = now + } + largestAcked := protocol.InvalidPacketNumber + if p.Ack != nil { + largestAcked = p.Ack.LargestAcked() + } + s.sentPacketHandler.SentPacket(now, p.PacketNumber, largestAcked, p.StreamFrames, p.Frames, protocol.Encryption1RTT, ecn, p.Length, p.IsPathMTUProbePacket) + } + s.connIDManager.SentPacket() + s.sendQueue.Send(packet.buffer, 0, ecn) + return nil +} + +func (s *connection) sendConnectionClose(e error) ([]byte, error) { + var packet *coalescedPacket + var err error + var transportErr *qerr.TransportError + var applicationErr *qerr.ApplicationError + if errors.As(e, &transportErr) { + packet, err = s.packer.PackConnectionClose(transportErr, s.maxPacketSize(), s.version) + } else if errors.As(e, &applicationErr) { + packet, err = s.packer.PackApplicationClose(applicationErr, s.maxPacketSize(), s.version) + } else { + packet, err = s.packer.PackConnectionClose(&qerr.TransportError{ + ErrorCode: qerr.InternalError, + ErrorMessage: fmt.Sprintf("connection BUG: unspecified error type (msg: %s)", e.Error()), + }, s.maxPacketSize(), s.version) + } + if err != nil { + return nil, err + } + ecn := s.sentPacketHandler.ECNMode(packet.IsOnlyShortHeaderPacket()) + s.logCoalescedPacket(packet, ecn) + return packet.buffer.Data, s.conn.Write(packet.buffer.Data, 0, ecn) +} + +func (s *connection) maxPacketSize() protocol.ByteCount { + if s.mtuDiscoverer == nil { + // Use the configured packet size on the client side. + // If the server sends a max_udp_payload_size that's smaller than this size, we can ignore this: + // Apparently the server still processed the (fully padded) Initial packet anyway. + if s.perspective == protocol.PerspectiveClient { + return protocol.ByteCount(s.config.InitialPacketSize) + } + // On the server side, there's no downside to using 1200 bytes until we received the client's transport + // parameters: + // * If the first packet didn't contain the entire ClientHello, all we can do is ACK that packet. We don't + // need a lot of bytes for that. + // * If it did, we will have processed the transport parameters and initialized the MTU discoverer. + return protocol.MinInitialPacketSize + } + return s.mtuDiscoverer.CurrentSize() +} + +// AcceptStream returns the next stream openend by the peer +func (s *connection) AcceptStream(ctx context.Context) (Stream, error) { + return s.streamsMap.AcceptStream(ctx) +} + +func (s *connection) AcceptUniStream(ctx context.Context) (ReceiveStream, error) { + return s.streamsMap.AcceptUniStream(ctx) +} + +// OpenStream opens a stream +func (s *connection) OpenStream() (Stream, error) { + return s.streamsMap.OpenStream() +} + +func (s *connection) OpenStreamSync(ctx context.Context) (Stream, error) { + return s.streamsMap.OpenStreamSync(ctx) +} + +func (s *connection) OpenUniStream() (SendStream, error) { + return s.streamsMap.OpenUniStream() +} + +func (s *connection) OpenUniStreamSync(ctx context.Context) (SendStream, error) { + return s.streamsMap.OpenUniStreamSync(ctx) +} + +func (s *connection) newFlowController(id protocol.StreamID) flowcontrol.StreamFlowController { + initialSendWindow := s.peerParams.InitialMaxStreamDataUni + if id.Type() == protocol.StreamTypeBidi { + if id.InitiatedBy() == s.perspective { + initialSendWindow = s.peerParams.InitialMaxStreamDataBidiRemote + } else { + initialSendWindow = s.peerParams.InitialMaxStreamDataBidiLocal + } + } + return flowcontrol.NewStreamFlowController( + id, + s.connFlowController, + protocol.ByteCount(s.config.InitialStreamReceiveWindow), + protocol.ByteCount(s.config.MaxStreamReceiveWindow), + initialSendWindow, + s.rttStats, + s.logger, + ) +} + +// scheduleSending signals that we have data for sending +func (s *connection) scheduleSending() { + select { + case s.sendingScheduled <- struct{}{}: + default: + } +} + +// tryQueueingUndecryptablePacket queues a packet for which we're missing the decryption keys. +// The logging.PacketType is only used for logging purposes. +func (s *connection) tryQueueingUndecryptablePacket(p receivedPacket, pt logging.PacketType) { + if s.handshakeComplete { + panic("shouldn't queue undecryptable packets after handshake completion") + } + if len(s.undecryptablePackets)+1 > protocol.MaxUndecryptablePackets { + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(pt, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropDOSPrevention) + } + s.logger.Infof("Dropping undecryptable packet (%d bytes). Undecryptable packet queue full.", p.Size()) + return + } + s.logger.Infof("Queueing packet (%d bytes) for later decryption", p.Size()) + if s.tracer != nil && s.tracer.BufferedPacket != nil { + s.tracer.BufferedPacket(pt, p.Size()) + } + s.undecryptablePackets = append(s.undecryptablePackets, p) +} + +func (s *connection) queueControlFrame(f wire.Frame) { + s.framer.QueueControlFrame(f) + s.scheduleSending() +} + +func (s *connection) onHasStreamData(id protocol.StreamID, str sendStreamI) { + s.framer.AddActiveStream(id, str) + s.scheduleSending() +} + +func (s *connection) onHasStreamControlFrame(id protocol.StreamID, str streamControlFrameGetter) { + s.framer.AddStreamWithControlFrames(id, str) + s.scheduleSending() +} + +func (s *connection) onStreamCompleted(id protocol.StreamID) { + if err := s.streamsMap.DeleteStream(id); err != nil { + s.closeLocal(err) + } + s.framer.RemoveActiveStream(id) +} + +func (s *connection) onMTUIncreased(mtu protocol.ByteCount) { + s.maxPayloadSizeEstimate.Store(uint32(estimateMaxPayloadSize(mtu))) + s.sentPacketHandler.SetMaxDatagramSize(mtu) +} + +func (s *connection) SendDatagram(p []byte) error { + if !s.supportsDatagrams() { + return errors.New("datagram support disabled") + } + + f := &wire.DatagramFrame{DataLenPresent: true} + // The payload size estimate is conservative. + // Under many circumstances we could send a few more bytes. + maxDataLen := min( + f.MaxDataLen(s.peerParams.MaxDatagramFrameSize, s.version), + protocol.ByteCount(s.maxPayloadSizeEstimate.Load()), + ) + if protocol.ByteCount(len(p)) > maxDataLen { + return &DatagramTooLargeError{MaxDatagramPayloadSize: int64(maxDataLen)} + } + f.Data = make([]byte, len(p)) + copy(f.Data, p) + return s.datagramQueue.Add(f) +} + +func (s *connection) ReceiveDatagram(ctx context.Context) ([]byte, error) { + if !s.config.EnableDatagrams { + return nil, errors.New("datagram support disabled") + } + return s.datagramQueue.Receive(ctx) +} + +func (s *connection) LocalAddr() net.Addr { + return s.conn.LocalAddr() +} + +func (s *connection) RemoteAddr() net.Addr { + return s.conn.RemoteAddr() +} + +func (s *connection) GetVersion() protocol.Version { + return s.version +} + +func (s *connection) NextConnection(ctx context.Context) (Connection, error) { + // The handshake might fail after the server rejected 0-RTT. + // This could happen if the Finished message is malformed or never received. + select { + case <-ctx.Done(): + return nil, context.Cause(ctx) + case <-s.Context().Done(): + case <-s.HandshakeComplete(): + s.streamsMap.UseResetMaps() + } + return s, nil +} + +// estimateMaxPayloadSize estimates the maximum payload size for short header packets. +// It is not very sophisticated: it just subtracts the size of header (assuming the maximum +// connection ID length), and the size of the encryption tag. +func estimateMaxPayloadSize(mtu protocol.ByteCount) protocol.ByteCount { + return mtu - 1 /* type byte */ - 20 /* maximum connection ID length */ - 16 /* tag size */ +} diff --git a/vendor/github.com/quic-go/quic-go/connection_logging.go b/vendor/github.com/quic-go/quic-go/connection_logging.go new file mode 100644 index 00000000..f75b39f6 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/connection_logging.go @@ -0,0 +1,173 @@ +package quic + +import ( + "slices" + + "github.com/quic-go/quic-go/internal/ackhandler" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/wire" + "github.com/quic-go/quic-go/logging" +) + +// ConvertFrame converts a wire.Frame into a logging.Frame. +// This makes it possible for external packages to access the frames. +// Furthermore, it removes the data slices from CRYPTO and STREAM frames. +func toLoggingFrame(frame wire.Frame) logging.Frame { + switch f := frame.(type) { + case *wire.AckFrame: + // We use a pool for ACK frames. + // Implementations of the tracer interface may hold on to frames, so we need to make a copy here. + return toLoggingAckFrame(f) + case *wire.CryptoFrame: + return &logging.CryptoFrame{ + Offset: f.Offset, + Length: protocol.ByteCount(len(f.Data)), + } + case *wire.StreamFrame: + return &logging.StreamFrame{ + StreamID: f.StreamID, + Offset: f.Offset, + Length: f.DataLen(), + Fin: f.Fin, + } + case *wire.DatagramFrame: + return &logging.DatagramFrame{ + Length: logging.ByteCount(len(f.Data)), + } + default: + return logging.Frame(frame) + } +} + +func toLoggingAckFrame(f *wire.AckFrame) *logging.AckFrame { + ack := &logging.AckFrame{ + AckRanges: slices.Clone(f.AckRanges), + DelayTime: f.DelayTime, + ECNCE: f.ECNCE, + ECT0: f.ECT0, + ECT1: f.ECT1, + } + return ack +} + +func (s *connection) logLongHeaderPacket(p *longHeaderPacket, ecn protocol.ECN) { + // quic-go logging + if s.logger.Debug() { + p.header.Log(s.logger) + if p.ack != nil { + wire.LogFrame(s.logger, p.ack, true) + } + for _, frame := range p.frames { + wire.LogFrame(s.logger, frame.Frame, true) + } + for _, frame := range p.streamFrames { + wire.LogFrame(s.logger, frame.Frame, true) + } + } + + // tracing + if s.tracer != nil && s.tracer.SentLongHeaderPacket != nil { + frames := make([]logging.Frame, 0, len(p.frames)) + for _, f := range p.frames { + frames = append(frames, toLoggingFrame(f.Frame)) + } + for _, f := range p.streamFrames { + frames = append(frames, toLoggingFrame(f.Frame)) + } + var ack *logging.AckFrame + if p.ack != nil { + ack = toLoggingAckFrame(p.ack) + } + s.tracer.SentLongHeaderPacket(p.header, p.length, ecn, ack, frames) + } +} + +func (s *connection) logShortHeaderPacket( + destConnID protocol.ConnectionID, + ackFrame *wire.AckFrame, + frames []ackhandler.Frame, + streamFrames []ackhandler.StreamFrame, + pn protocol.PacketNumber, + pnLen protocol.PacketNumberLen, + kp protocol.KeyPhaseBit, + ecn protocol.ECN, + size protocol.ByteCount, + isCoalesced bool, +) { + if s.logger.Debug() && !isCoalesced { + s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, 1-RTT (ECN: %s)", pn, size, s.logID, ecn) + } + // quic-go logging + if s.logger.Debug() { + wire.LogShortHeader(s.logger, destConnID, pn, pnLen, kp) + if ackFrame != nil { + wire.LogFrame(s.logger, ackFrame, true) + } + for _, f := range frames { + wire.LogFrame(s.logger, f.Frame, true) + } + for _, f := range streamFrames { + wire.LogFrame(s.logger, f.Frame, true) + } + } + + // tracing + if s.tracer != nil && s.tracer.SentShortHeaderPacket != nil { + fs := make([]logging.Frame, 0, len(frames)+len(streamFrames)) + for _, f := range frames { + fs = append(fs, toLoggingFrame(f.Frame)) + } + for _, f := range streamFrames { + fs = append(fs, toLoggingFrame(f.Frame)) + } + var ack *logging.AckFrame + if ackFrame != nil { + ack = toLoggingAckFrame(ackFrame) + } + s.tracer.SentShortHeaderPacket( + &logging.ShortHeader{ + DestConnectionID: destConnID, + PacketNumber: pn, + PacketNumberLen: pnLen, + KeyPhase: kp, + }, + size, + ecn, + ack, + fs, + ) + } +} + +func (s *connection) logCoalescedPacket(packet *coalescedPacket, ecn protocol.ECN) { + if s.logger.Debug() { + // There's a short period between dropping both Initial and Handshake keys and completion of the handshake, + // during which we might call PackCoalescedPacket but just pack a short header packet. + if len(packet.longHdrPackets) == 0 && packet.shortHdrPacket != nil { + s.logShortHeaderPacket( + packet.shortHdrPacket.DestConnID, + packet.shortHdrPacket.Ack, + packet.shortHdrPacket.Frames, + packet.shortHdrPacket.StreamFrames, + packet.shortHdrPacket.PacketNumber, + packet.shortHdrPacket.PacketNumberLen, + packet.shortHdrPacket.KeyPhase, + ecn, + packet.shortHdrPacket.Length, + false, + ) + return + } + if len(packet.longHdrPackets) > 1 { + s.logger.Debugf("-> Sending coalesced packet (%d parts, %d bytes) for connection %s", len(packet.longHdrPackets), packet.buffer.Len(), s.logID) + } else { + s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, %s", packet.longHdrPackets[0].header.PacketNumber, packet.buffer.Len(), s.logID, packet.longHdrPackets[0].EncryptionLevel()) + } + } + for _, p := range packet.longHdrPackets { + s.logLongHeaderPacket(p, ecn) + } + if p := packet.shortHdrPacket; p != nil { + s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.StreamFrames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, ecn, p.Length, true) + } +} diff --git a/vendor/github.com/quic-go/quic-go/connection_timer.go b/vendor/github.com/quic-go/quic-go/connection_timer.go new file mode 100644 index 00000000..171fdd01 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/connection_timer.go @@ -0,0 +1,51 @@ +package quic + +import ( + "time" + + "github.com/quic-go/quic-go/internal/utils" +) + +var deadlineSendImmediately = time.Time{}.Add(42 * time.Millisecond) // any value > time.Time{} and before time.Now() is fine + +type connectionTimer struct { + timer *utils.Timer + last time.Time +} + +func newTimer() *connectionTimer { + return &connectionTimer{timer: utils.NewTimer()} +} + +func (t *connectionTimer) SetRead() { + if deadline := t.timer.Deadline(); deadline != deadlineSendImmediately { + t.last = deadline + } + t.timer.SetRead() +} + +func (t *connectionTimer) Chan() <-chan time.Time { + return t.timer.Chan() +} + +// SetTimer resets the timer. +// It makes sure that the deadline is strictly increasing. +// This prevents busy-looping in cases where the timer fires, but we can't actually send out a packet. +// This doesn't apply to the pacing deadline, which can be set multiple times to deadlineSendImmediately. +func (t *connectionTimer) SetTimer(idleTimeoutOrKeepAlive, ackAlarm, lossTime, pacing time.Time) { + deadline := idleTimeoutOrKeepAlive + if !ackAlarm.IsZero() && ackAlarm.Before(deadline) && ackAlarm.After(t.last) { + deadline = ackAlarm + } + if !lossTime.IsZero() && lossTime.Before(deadline) && lossTime.After(t.last) { + deadline = lossTime + } + if !pacing.IsZero() && pacing.Before(deadline) { + deadline = pacing + } + t.timer.Reset(deadline) +} + +func (t *connectionTimer) Stop() { + t.timer.Stop() +} diff --git a/vendor/github.com/quic-go/quic-go/crypto_stream.go b/vendor/github.com/quic-go/quic-go/crypto_stream.go new file mode 100644 index 00000000..9a387baa --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/crypto_stream.go @@ -0,0 +1,83 @@ +package quic + +import ( + "fmt" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/wire" +) + +type cryptoStream struct { + queue frameSorter + + highestOffset protocol.ByteCount + finished bool + + writeOffset protocol.ByteCount + writeBuf []byte +} + +func newCryptoStream() *cryptoStream { + return &cryptoStream{queue: *newFrameSorter()} +} + +func (s *cryptoStream) HandleCryptoFrame(f *wire.CryptoFrame) error { + highestOffset := f.Offset + protocol.ByteCount(len(f.Data)) + if maxOffset := highestOffset; maxOffset > protocol.MaxCryptoStreamOffset { + return &qerr.TransportError{ + ErrorCode: qerr.CryptoBufferExceeded, + ErrorMessage: fmt.Sprintf("received invalid offset %d on crypto stream, maximum allowed %d", maxOffset, protocol.MaxCryptoStreamOffset), + } + } + if s.finished { + if highestOffset > s.highestOffset { + // reject crypto data received after this stream was already finished + return &qerr.TransportError{ + ErrorCode: qerr.ProtocolViolation, + ErrorMessage: "received crypto data after change of encryption level", + } + } + // ignore data with a smaller offset than the highest received + // could e.g. be a retransmission + return nil + } + s.highestOffset = max(s.highestOffset, highestOffset) + return s.queue.Push(f.Data, f.Offset, nil) +} + +// GetCryptoData retrieves data that was received in CRYPTO frames +func (s *cryptoStream) GetCryptoData() []byte { + _, data, _ := s.queue.Pop() + return data +} + +func (s *cryptoStream) Finish() error { + if s.queue.HasMoreData() { + return &qerr.TransportError{ + ErrorCode: qerr.ProtocolViolation, + ErrorMessage: "encryption level changed, but crypto stream has more data to read", + } + } + s.finished = true + return nil +} + +// Writes writes data that should be sent out in CRYPTO frames +func (s *cryptoStream) Write(p []byte) (int, error) { + s.writeBuf = append(s.writeBuf, p...) + return len(p), nil +} + +func (s *cryptoStream) HasData() bool { + return len(s.writeBuf) > 0 +} + +func (s *cryptoStream) PopCryptoFrame(maxLen protocol.ByteCount) *wire.CryptoFrame { + f := &wire.CryptoFrame{Offset: s.writeOffset} + n := min(f.MaxDataLen(maxLen), protocol.ByteCount(len(s.writeBuf))) + f.Data = s.writeBuf[:n] + s.writeBuf = s.writeBuf[n:] + s.writeOffset += n + return f +} diff --git a/vendor/github.com/quic-go/quic-go/crypto_stream_manager.go b/vendor/github.com/quic-go/quic-go/crypto_stream_manager.go new file mode 100644 index 00000000..d70b9b00 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/crypto_stream_manager.go @@ -0,0 +1,77 @@ +package quic + +import ( + "fmt" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/wire" +) + +type cryptoStreamManager struct { + initialStream *cryptoStream + handshakeStream *cryptoStream + oneRTTStream *cryptoStream +} + +func newCryptoStreamManager( + initialStream *cryptoStream, + handshakeStream *cryptoStream, + oneRTTStream *cryptoStream, +) *cryptoStreamManager { + return &cryptoStreamManager{ + initialStream: initialStream, + handshakeStream: handshakeStream, + oneRTTStream: oneRTTStream, + } +} + +func (m *cryptoStreamManager) HandleCryptoFrame(frame *wire.CryptoFrame, encLevel protocol.EncryptionLevel) error { + var str *cryptoStream + //nolint:exhaustive // CRYPTO frames cannot be sent in 0-RTT packets. + switch encLevel { + case protocol.EncryptionInitial: + str = m.initialStream + case protocol.EncryptionHandshake: + str = m.handshakeStream + case protocol.Encryption1RTT: + str = m.oneRTTStream + default: + return fmt.Errorf("received CRYPTO frame with unexpected encryption level: %s", encLevel) + } + return str.HandleCryptoFrame(frame) +} + +func (m *cryptoStreamManager) GetCryptoData(encLevel protocol.EncryptionLevel) []byte { + var str *cryptoStream + //nolint:exhaustive // CRYPTO frames cannot be sent in 0-RTT packets. + switch encLevel { + case protocol.EncryptionInitial: + str = m.initialStream + case protocol.EncryptionHandshake: + str = m.handshakeStream + case protocol.Encryption1RTT: + str = m.oneRTTStream + default: + panic(fmt.Sprintf("received CRYPTO frame with unexpected encryption level: %s", encLevel)) + } + return str.GetCryptoData() +} + +func (m *cryptoStreamManager) GetPostHandshakeData(maxSize protocol.ByteCount) *wire.CryptoFrame { + if !m.oneRTTStream.HasData() { + return nil + } + return m.oneRTTStream.PopCryptoFrame(maxSize) +} + +func (m *cryptoStreamManager) Drop(encLevel protocol.EncryptionLevel) error { + //nolint:exhaustive // 1-RTT keys should never get dropped. + switch encLevel { + case protocol.EncryptionInitial: + return m.initialStream.Finish() + case protocol.EncryptionHandshake: + return m.handshakeStream.Finish() + default: + panic(fmt.Sprintf("dropped unexpected encryption level: %s", encLevel)) + } +} diff --git a/vendor/github.com/quic-go/quic-go/datagram_queue.go b/vendor/github.com/quic-go/quic-go/datagram_queue.go new file mode 100644 index 00000000..e26285b2 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/datagram_queue.go @@ -0,0 +1,137 @@ +package quic + +import ( + "context" + "sync" + + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/utils/ringbuffer" + "github.com/quic-go/quic-go/internal/wire" +) + +const ( + maxDatagramSendQueueLen = 32 + maxDatagramRcvQueueLen = 128 +) + +type datagramQueue struct { + sendMx sync.Mutex + sendQueue ringbuffer.RingBuffer[*wire.DatagramFrame] + sent chan struct{} // used to notify Add that a datagram was dequeued + + rcvMx sync.Mutex + rcvQueue [][]byte + rcvd chan struct{} // used to notify Receive that a new datagram was received + + closeErr error + closed chan struct{} + + hasData func() + + logger utils.Logger +} + +func newDatagramQueue(hasData func(), logger utils.Logger) *datagramQueue { + return &datagramQueue{ + hasData: hasData, + rcvd: make(chan struct{}, 1), + sent: make(chan struct{}, 1), + closed: make(chan struct{}), + logger: logger, + } +} + +// Add queues a new DATAGRAM frame for sending. +// Up to 32 DATAGRAM frames will be queued. +// Once that limit is reached, Add blocks until the queue size has reduced. +func (h *datagramQueue) Add(f *wire.DatagramFrame) error { + h.sendMx.Lock() + + for { + if h.sendQueue.Len() < maxDatagramSendQueueLen { + h.sendQueue.PushBack(f) + h.sendMx.Unlock() + h.hasData() + return nil + } + select { + case <-h.sent: // drain the queue so we don't loop immediately + default: + } + h.sendMx.Unlock() + select { + case <-h.closed: + return h.closeErr + case <-h.sent: + } + h.sendMx.Lock() + } +} + +// Peek gets the next DATAGRAM frame for sending. +// If actually sent out, Pop needs to be called before the next call to Peek. +func (h *datagramQueue) Peek() *wire.DatagramFrame { + h.sendMx.Lock() + defer h.sendMx.Unlock() + if h.sendQueue.Empty() { + return nil + } + return h.sendQueue.PeekFront() +} + +func (h *datagramQueue) Pop() { + h.sendMx.Lock() + defer h.sendMx.Unlock() + _ = h.sendQueue.PopFront() + select { + case h.sent <- struct{}{}: + default: + } +} + +// HandleDatagramFrame handles a received DATAGRAM frame. +func (h *datagramQueue) HandleDatagramFrame(f *wire.DatagramFrame) { + data := make([]byte, len(f.Data)) + copy(data, f.Data) + var queued bool + h.rcvMx.Lock() + if len(h.rcvQueue) < maxDatagramRcvQueueLen { + h.rcvQueue = append(h.rcvQueue, data) + queued = true + select { + case h.rcvd <- struct{}{}: + default: + } + } + h.rcvMx.Unlock() + if !queued && h.logger.Debug() { + h.logger.Debugf("Discarding received DATAGRAM frame (%d bytes payload)", len(f.Data)) + } +} + +// Receive gets a received DATAGRAM frame. +func (h *datagramQueue) Receive(ctx context.Context) ([]byte, error) { + for { + h.rcvMx.Lock() + if len(h.rcvQueue) > 0 { + data := h.rcvQueue[0] + h.rcvQueue = h.rcvQueue[1:] + h.rcvMx.Unlock() + return data, nil + } + h.rcvMx.Unlock() + select { + case <-h.rcvd: + continue + case <-h.closed: + return nil, h.closeErr + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (h *datagramQueue) CloseWithError(e error) { + h.closeErr = e + close(h.closed) +} diff --git a/vendor/github.com/quic-go/quic-go/errors.go b/vendor/github.com/quic-go/quic-go/errors.go new file mode 100644 index 00000000..3fe1e0a9 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/errors.go @@ -0,0 +1,75 @@ +package quic + +import ( + "fmt" + + "github.com/quic-go/quic-go/internal/qerr" +) + +type ( + TransportError = qerr.TransportError + ApplicationError = qerr.ApplicationError + VersionNegotiationError = qerr.VersionNegotiationError + StatelessResetError = qerr.StatelessResetError + IdleTimeoutError = qerr.IdleTimeoutError + HandshakeTimeoutError = qerr.HandshakeTimeoutError +) + +type ( + TransportErrorCode = qerr.TransportErrorCode + ApplicationErrorCode = qerr.ApplicationErrorCode + StreamErrorCode = qerr.StreamErrorCode +) + +const ( + NoError = qerr.NoError + InternalError = qerr.InternalError + ConnectionRefused = qerr.ConnectionRefused + FlowControlError = qerr.FlowControlError + StreamLimitError = qerr.StreamLimitError + StreamStateError = qerr.StreamStateError + FinalSizeError = qerr.FinalSizeError + FrameEncodingError = qerr.FrameEncodingError + TransportParameterError = qerr.TransportParameterError + ConnectionIDLimitError = qerr.ConnectionIDLimitError + ProtocolViolation = qerr.ProtocolViolation + InvalidToken = qerr.InvalidToken + ApplicationErrorErrorCode = qerr.ApplicationErrorErrorCode + CryptoBufferExceeded = qerr.CryptoBufferExceeded + KeyUpdateError = qerr.KeyUpdateError + AEADLimitReached = qerr.AEADLimitReached + NoViablePathError = qerr.NoViablePathError +) + +// A StreamError is used for Stream.CancelRead and Stream.CancelWrite. +// It is also returned from Stream.Read and Stream.Write if the peer canceled reading or writing. +type StreamError struct { + StreamID StreamID + ErrorCode StreamErrorCode + Remote bool +} + +func (e *StreamError) Is(target error) bool { + _, ok := target.(*StreamError) + return ok +} + +func (e *StreamError) Error() string { + pers := "local" + if e.Remote { + pers = "remote" + } + return fmt.Sprintf("stream %d canceled by %s with error code %d", e.StreamID, pers, e.ErrorCode) +} + +// DatagramTooLargeError is returned from Connection.SendDatagram if the payload is too large to be sent. +type DatagramTooLargeError struct { + MaxDatagramPayloadSize int64 +} + +func (e *DatagramTooLargeError) Is(target error) bool { + _, ok := target.(*DatagramTooLargeError) + return ok +} + +func (e *DatagramTooLargeError) Error() string { return "DATAGRAM frame too large" } diff --git a/vendor/github.com/quic-go/quic-go/frame_sorter.go b/vendor/github.com/quic-go/quic-go/frame_sorter.go new file mode 100644 index 00000000..bee0abad --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/frame_sorter.go @@ -0,0 +1,237 @@ +package quic + +import ( + "errors" + "sync" + + "github.com/quic-go/quic-go/internal/protocol" + list "github.com/quic-go/quic-go/internal/utils/linkedlist" +) + +// byteInterval is an interval from one ByteCount to the other +type byteInterval struct { + Start protocol.ByteCount + End protocol.ByteCount +} + +var byteIntervalElementPool sync.Pool + +func init() { + byteIntervalElementPool = *list.NewPool[byteInterval]() +} + +type frameSorterEntry struct { + Data []byte + DoneCb func() +} + +type frameSorter struct { + queue map[protocol.ByteCount]frameSorterEntry + readPos protocol.ByteCount + gaps *list.List[byteInterval] +} + +var errDuplicateStreamData = errors.New("duplicate stream data") + +func newFrameSorter() *frameSorter { + s := frameSorter{ + gaps: list.NewWithPool[byteInterval](&byteIntervalElementPool), + queue: make(map[protocol.ByteCount]frameSorterEntry), + } + s.gaps.PushFront(byteInterval{Start: 0, End: protocol.MaxByteCount}) + return &s +} + +func (s *frameSorter) Push(data []byte, offset protocol.ByteCount, doneCb func()) error { + err := s.push(data, offset, doneCb) + if err == errDuplicateStreamData { + if doneCb != nil { + doneCb() + } + return nil + } + return err +} + +func (s *frameSorter) push(data []byte, offset protocol.ByteCount, doneCb func()) error { + if len(data) == 0 { + return errDuplicateStreamData + } + + start := offset + end := offset + protocol.ByteCount(len(data)) + + if end <= s.gaps.Front().Value.Start { + return errDuplicateStreamData + } + + startGap, startsInGap := s.findStartGap(start) + endGap, endsInGap := s.findEndGap(startGap, end) + + startGapEqualsEndGap := startGap == endGap + + if (startGapEqualsEndGap && end <= startGap.Value.Start) || + (!startGapEqualsEndGap && startGap.Value.End >= endGap.Value.Start && end <= startGap.Value.Start) { + return errDuplicateStreamData + } + + startGapNext := startGap.Next() + startGapEnd := startGap.Value.End // save it, in case startGap is modified + endGapStart := endGap.Value.Start // save it, in case endGap is modified + endGapEnd := endGap.Value.End // save it, in case endGap is modified + var adjustedStartGapEnd bool + var wasCut bool + + pos := start + var hasReplacedAtLeastOne bool + for { + oldEntry, ok := s.queue[pos] + if !ok { + break + } + oldEntryLen := protocol.ByteCount(len(oldEntry.Data)) + if end-pos > oldEntryLen || (hasReplacedAtLeastOne && end-pos == oldEntryLen) { + // The existing frame is shorter than the new frame. Replace it. + delete(s.queue, pos) + pos += oldEntryLen + hasReplacedAtLeastOne = true + if oldEntry.DoneCb != nil { + oldEntry.DoneCb() + } + } else { + if !hasReplacedAtLeastOne { + return errDuplicateStreamData + } + // The existing frame is longer than the new frame. + // Cut the new frame such that the end aligns with the start of the existing frame. + data = data[:pos-start] + end = pos + wasCut = true + break + } + } + + if !startsInGap && !hasReplacedAtLeastOne { + // cut the frame, such that it starts at the start of the gap + data = data[startGap.Value.Start-start:] + start = startGap.Value.Start + wasCut = true + } + if start <= startGap.Value.Start { + if end >= startGap.Value.End { + // The frame covers the whole startGap. Delete the gap. + s.gaps.Remove(startGap) + } else { + startGap.Value.Start = end + } + } else if !hasReplacedAtLeastOne { + startGap.Value.End = start + adjustedStartGapEnd = true + } + + if !startGapEqualsEndGap { + s.deleteConsecutive(startGapEnd) + var nextGap *list.Element[byteInterval] + for gap := startGapNext; gap.Value.End < endGapStart; gap = nextGap { + nextGap = gap.Next() + s.deleteConsecutive(gap.Value.End) + s.gaps.Remove(gap) + } + } + + if !endsInGap && start != endGapEnd && end > endGapEnd { + // cut the frame, such that it ends at the end of the gap + data = data[:endGapEnd-start] + end = endGapEnd + wasCut = true + } + if end == endGapEnd { + if !startGapEqualsEndGap { + // The frame covers the whole endGap. Delete the gap. + s.gaps.Remove(endGap) + } + } else { + if startGapEqualsEndGap && adjustedStartGapEnd { + // The frame split the existing gap into two. + s.gaps.InsertAfter(byteInterval{Start: end, End: startGapEnd}, startGap) + } else if !startGapEqualsEndGap { + endGap.Value.Start = end + } + } + + if wasCut && len(data) < protocol.MinStreamFrameBufferSize { + newData := make([]byte, len(data)) + copy(newData, data) + data = newData + if doneCb != nil { + doneCb() + doneCb = nil + } + } + + if s.gaps.Len() > protocol.MaxStreamFrameSorterGaps { + return errors.New("too many gaps in received data") + } + + s.queue[start] = frameSorterEntry{Data: data, DoneCb: doneCb} + return nil +} + +func (s *frameSorter) findStartGap(offset protocol.ByteCount) (*list.Element[byteInterval], bool) { + for gap := s.gaps.Front(); gap != nil; gap = gap.Next() { + if offset >= gap.Value.Start && offset <= gap.Value.End { + return gap, true + } + if offset < gap.Value.Start { + return gap, false + } + } + panic("no gap found") +} + +func (s *frameSorter) findEndGap(startGap *list.Element[byteInterval], offset protocol.ByteCount) (*list.Element[byteInterval], bool) { + for gap := startGap; gap != nil; gap = gap.Next() { + if offset >= gap.Value.Start && offset < gap.Value.End { + return gap, true + } + if offset < gap.Value.Start { + return gap.Prev(), false + } + } + panic("no gap found") +} + +// deleteConsecutive deletes consecutive frames from the queue, starting at pos +func (s *frameSorter) deleteConsecutive(pos protocol.ByteCount) { + for { + oldEntry, ok := s.queue[pos] + if !ok { + break + } + oldEntryLen := protocol.ByteCount(len(oldEntry.Data)) + delete(s.queue, pos) + if oldEntry.DoneCb != nil { + oldEntry.DoneCb() + } + pos += oldEntryLen + } +} + +func (s *frameSorter) Pop() (protocol.ByteCount, []byte, func()) { + entry, ok := s.queue[s.readPos] + if !ok { + return s.readPos, nil, nil + } + delete(s.queue, s.readPos) + offset := s.readPos + s.readPos += protocol.ByteCount(len(entry.Data)) + if s.gaps.Front().Value.End <= s.readPos { + panic("frame sorter BUG: read position higher than a gap") + } + return offset, entry.Data, entry.DoneCb +} + +// HasMoreData says if there is any more data queued at *any* offset. +func (s *frameSorter) HasMoreData() bool { + return len(s.queue) > 0 +} diff --git a/vendor/github.com/quic-go/quic-go/framer.go b/vendor/github.com/quic-go/quic-go/framer.go new file mode 100644 index 00000000..e162f6b8 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/framer.go @@ -0,0 +1,240 @@ +package quic + +import ( + "slices" + "sync" + + "github.com/quic-go/quic-go/internal/ackhandler" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils/ringbuffer" + "github.com/quic-go/quic-go/internal/wire" + "github.com/quic-go/quic-go/quicvarint" +) + +const ( + maxPathResponses = 256 + maxControlFrames = 16 << 10 +) + +// This is the largest possible size of a stream-related control frame +// (which is the RESET_STREAM frame). +const maxStreamControlFrameSize = 25 + +type streamControlFrameGetter interface { + getControlFrame() (_ ackhandler.Frame, ok, hasMore bool) +} + +type framer struct { + mutex sync.Mutex + + activeStreams map[protocol.StreamID]sendStreamI + streamQueue ringbuffer.RingBuffer[protocol.StreamID] + streamsWithControlFrames map[protocol.StreamID]streamControlFrameGetter + + controlFrameMutex sync.Mutex + controlFrames []wire.Frame + pathResponses []*wire.PathResponseFrame + queuedTooManyControlFrames bool +} + +func newFramer() *framer { + return &framer{ + activeStreams: make(map[protocol.StreamID]sendStreamI), + streamsWithControlFrames: make(map[protocol.StreamID]streamControlFrameGetter), + } +} + +func (f *framer) HasData() bool { + f.mutex.Lock() + hasData := !f.streamQueue.Empty() + f.mutex.Unlock() + if hasData { + return true + } + f.controlFrameMutex.Lock() + defer f.controlFrameMutex.Unlock() + return len(f.streamsWithControlFrames) > 0 || len(f.controlFrames) > 0 || len(f.pathResponses) > 0 +} + +func (f *framer) QueueControlFrame(frame wire.Frame) { + f.controlFrameMutex.Lock() + defer f.controlFrameMutex.Unlock() + + if pr, ok := frame.(*wire.PathResponseFrame); ok { + // Only queue up to maxPathResponses PATH_RESPONSE frames. + // This limit should be high enough to never be hit in practice, + // unless the peer is doing something malicious. + if len(f.pathResponses) >= maxPathResponses { + return + } + f.pathResponses = append(f.pathResponses, pr) + return + } + // This is a hack. + if len(f.controlFrames) >= maxControlFrames { + f.queuedTooManyControlFrames = true + return + } + f.controlFrames = append(f.controlFrames, frame) +} + +func (f *framer) AppendControlFrames(frames []ackhandler.Frame, maxLen protocol.ByteCount, v protocol.Version) ([]ackhandler.Frame, protocol.ByteCount) { + f.controlFrameMutex.Lock() + defer f.controlFrameMutex.Unlock() + + var length protocol.ByteCount + // add a PATH_RESPONSE first, but only pack a single PATH_RESPONSE per packet + if len(f.pathResponses) > 0 { + frame := f.pathResponses[0] + frameLen := frame.Length(v) + if frameLen <= maxLen { + frames = append(frames, ackhandler.Frame{Frame: frame}) + length += frameLen + f.pathResponses = f.pathResponses[1:] + } + } + + // add stream-related control frames + for id, str := range f.streamsWithControlFrames { + start: + remainingLen := maxLen - length + if remainingLen <= maxStreamControlFrameSize { + break + } + fr, ok, hasMore := str.getControlFrame() + if !hasMore { + delete(f.streamsWithControlFrames, id) + } + if !ok { + continue + } + frames = append(frames, fr) + length += fr.Frame.Length(v) + if hasMore { + // It is rare that a stream has more than one control frame to queue. + // We don't want to spawn another loop for just to cover that case. + goto start + } + } + + for len(f.controlFrames) > 0 { + frame := f.controlFrames[len(f.controlFrames)-1] + frameLen := frame.Length(v) + if length+frameLen > maxLen { + break + } + frames = append(frames, ackhandler.Frame{Frame: frame}) + length += frameLen + f.controlFrames = f.controlFrames[:len(f.controlFrames)-1] + } + + return frames, length +} + +// QueuedTooManyControlFrames says if the control frame queue exceeded its maximum queue length. +// This is a hack. +// It is easier to implement than propagating an error return value in QueueControlFrame. +// The correct solution would be to queue frames with their respective structs. +// See https://github.com/quic-go/quic-go/issues/4271 for the queueing of stream-related control frames. +func (f *framer) QueuedTooManyControlFrames() bool { + return f.queuedTooManyControlFrames +} + +func (f *framer) AddActiveStream(id protocol.StreamID, str sendStreamI) { + f.mutex.Lock() + if _, ok := f.activeStreams[id]; !ok { + f.streamQueue.PushBack(id) + f.activeStreams[id] = str + } + f.mutex.Unlock() +} + +func (f *framer) AddStreamWithControlFrames(id protocol.StreamID, str streamControlFrameGetter) { + f.controlFrameMutex.Lock() + if _, ok := f.streamsWithControlFrames[id]; !ok { + f.streamsWithControlFrames[id] = str + } + f.controlFrameMutex.Unlock() +} + +// RemoveActiveStream is called when a stream completes. +func (f *framer) RemoveActiveStream(id protocol.StreamID) { + f.mutex.Lock() + delete(f.activeStreams, id) + // We don't delete the stream from the streamQueue, + // since we'd have to iterate over the ringbuffer. + // Instead, we check if the stream is still in activeStreams in AppendStreamFrames. + f.mutex.Unlock() +} + +func (f *framer) AppendStreamFrames(frames []ackhandler.StreamFrame, maxLen protocol.ByteCount, v protocol.Version) ([]ackhandler.StreamFrame, protocol.ByteCount) { + startLen := len(frames) + var length protocol.ByteCount + f.mutex.Lock() + // pop STREAM frames, until less than 128 bytes are left in the packet + numActiveStreams := f.streamQueue.Len() + for i := 0; i < numActiveStreams; i++ { + if protocol.MinStreamFrameSize+length > maxLen { + break + } + id := f.streamQueue.PopFront() + // This should never return an error. Better check it anyway. + // The stream will only be in the streamQueue, if it enqueued itself there. + str, ok := f.activeStreams[id] + // The stream might have been removed after being enqueued. + if !ok { + continue + } + remainingLen := maxLen - length + // For the last STREAM frame, we'll remove the DataLen field later. + // Therefore, we can pretend to have more bytes available when popping + // the STREAM frame (which will always have the DataLen set). + remainingLen += protocol.ByteCount(quicvarint.Len(uint64(remainingLen))) + frame, ok, hasMoreData := str.popStreamFrame(remainingLen, v) + if hasMoreData { // put the stream back in the queue (at the end) + f.streamQueue.PushBack(id) + } else { // no more data to send. Stream is not active + delete(f.activeStreams, id) + } + // The frame can be "nil" + // * if the stream was canceled after it said it had data + // * the remaining size doesn't allow us to add another STREAM frame + if !ok { + continue + } + frames = append(frames, frame) + length += frame.Frame.Length(v) + } + f.mutex.Unlock() + if len(frames) > startLen { + l := frames[len(frames)-1].Frame.Length(v) + // account for the smaller size of the last STREAM frame + frames[len(frames)-1].Frame.DataLenPresent = false + length += frames[len(frames)-1].Frame.Length(v) - l + } + return frames, length +} + +func (f *framer) Handle0RTTRejection() { + f.mutex.Lock() + defer f.mutex.Unlock() + f.controlFrameMutex.Lock() + defer f.controlFrameMutex.Unlock() + + f.streamQueue.Clear() + for id := range f.activeStreams { + delete(f.activeStreams, id) + } + var j int + for i, frame := range f.controlFrames { + switch frame.(type) { + case *wire.MaxDataFrame, *wire.MaxStreamDataFrame, *wire.MaxStreamsFrame, + *wire.DataBlockedFrame, *wire.StreamDataBlockedFrame, *wire.StreamsBlockedFrame: + continue + default: + f.controlFrames[j] = f.controlFrames[i] + j++ + } + } + f.controlFrames = slices.Delete(f.controlFrames, j, len(f.controlFrames)) +} diff --git a/vendor/github.com/quic-go/quic-go/interface.go b/vendor/github.com/quic-go/quic-go/interface.go new file mode 100644 index 00000000..2071b596 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/interface.go @@ -0,0 +1,371 @@ +package quic + +import ( + "context" + "crypto/tls" + "errors" + "io" + "net" + "time" + + "github.com/quic-go/quic-go/internal/handshake" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/logging" +) + +// The StreamID is the ID of a QUIC stream. +type StreamID = protocol.StreamID + +// A Version is a QUIC version number. +type Version = protocol.Version + +const ( + // Version1 is RFC 9000 + Version1 = protocol.Version1 + // Version2 is RFC 9369 + Version2 = protocol.Version2 +) + +// A ClientToken is a token received by the client. +// It can be used to skip address validation on future connection attempts. +type ClientToken struct { + data []byte +} + +type TokenStore interface { + // Pop searches for a ClientToken associated with the given key. + // Since tokens are not supposed to be reused, it must remove the token from the cache. + // It returns nil when no token is found. + Pop(key string) (token *ClientToken) + + // Put adds a token to the cache with the given key. It might get called + // multiple times in a connection. + Put(key string, token *ClientToken) +} + +// Err0RTTRejected is the returned from: +// * Open{Uni}Stream{Sync} +// * Accept{Uni}Stream +// * Stream.Read and Stream.Write +// when the server rejects a 0-RTT connection attempt. +var Err0RTTRejected = errors.New("0-RTT rejected") + +// ConnectionTracingKey can be used to associate a ConnectionTracer with a Connection. +// It is set on the Connection.Context() context, +// as well as on the context passed to logging.Tracer.NewConnectionTracer. +// Deprecated: Applications can set their own tracing key using Transport.ConnContext. +var ConnectionTracingKey = connTracingCtxKey{} + +// ConnectionTracingID is the type of the context value saved under the ConnectionTracingKey. +// Deprecated: Applications can set their own tracing key using Transport.ConnContext. +type ConnectionTracingID uint64 + +type connTracingCtxKey struct{} + +// QUICVersionContextKey can be used to find out the QUIC version of a TLS handshake from the +// context returned by tls.Config.ClientHelloInfo.Context. +var QUICVersionContextKey = handshake.QUICVersionContextKey + +// Stream is the interface implemented by QUIC streams +// In addition to the errors listed on the Connection, +// calls to stream functions can return a StreamError if the stream is canceled. +type Stream interface { + ReceiveStream + SendStream + // SetDeadline sets the read and write deadlines associated + // with the connection. It is equivalent to calling both + // SetReadDeadline and SetWriteDeadline. + SetDeadline(t time.Time) error +} + +// A ReceiveStream is a unidirectional Receive Stream. +type ReceiveStream interface { + // StreamID returns the stream ID. + StreamID() StreamID + // Read reads data from the stream. + // Read can be made to time out and return a net.Error with Timeout() == true + // after a fixed time limit; see SetDeadline and SetReadDeadline. + // If the stream was canceled by the peer, the error is a StreamError and + // Remote == true. + // If the connection was closed due to a timeout, the error satisfies + // the net.Error interface, and Timeout() will be true. + io.Reader + // CancelRead aborts receiving on this stream. + // It will ask the peer to stop transmitting stream data. + // Read will unblock immediately, and future Read calls will fail. + // When called multiple times or after reading the io.EOF it is a no-op. + CancelRead(StreamErrorCode) + // SetReadDeadline sets the deadline for future Read calls and + // any currently-blocked Read call. + // A zero value for t means Read will not time out. + + SetReadDeadline(t time.Time) error +} + +// A SendStream is a unidirectional Send Stream. +type SendStream interface { + // StreamID returns the stream ID. + StreamID() StreamID + // Write writes data to the stream. + // Write can be made to time out and return a net.Error with Timeout() == true + // after a fixed time limit; see SetDeadline and SetWriteDeadline. + // If the stream was canceled by the peer, the error is a StreamError and + // Remote == true. + // If the connection was closed due to a timeout, the error satisfies + // the net.Error interface, and Timeout() will be true. + io.Writer + // Close closes the write-direction of the stream. + // Future calls to Write are not permitted after calling Close. + // It must not be called concurrently with Write. + // It must not be called after calling CancelWrite. + io.Closer + // CancelWrite aborts sending on this stream. + // Data already written, but not yet delivered to the peer is not guaranteed to be delivered reliably. + // Write will unblock immediately, and future calls to Write will fail. + // When called multiple times it is a no-op. + // When called after Close, it aborts delivery. Note that there is no guarantee if + // the peer will receive the FIN or the reset first. + CancelWrite(StreamErrorCode) + // The Context is canceled as soon as the write-side of the stream is closed. + // This happens when Close() or CancelWrite() is called, or when the peer + // cancels the read-side of their stream. + // The cancellation cause is set to the error that caused the stream to + // close, or `context.Canceled` in case the stream is closed without error. + Context() context.Context + // SetWriteDeadline sets the deadline for future Write calls + // and any currently-blocked Write call. + // Even if write times out, it may return n > 0, indicating that + // some data was successfully written. + // A zero value for t means Write will not time out. + SetWriteDeadline(t time.Time) error +} + +// A Connection is a QUIC connection between two peers. +// Calls to the connection (and to streams) can return the following types of errors: +// * ApplicationError: for errors triggered by the application running on top of QUIC +// * TransportError: for errors triggered by the QUIC transport (in many cases a misbehaving peer) +// * IdleTimeoutError: when the peer goes away unexpectedly (this is a net.Error timeout error) +// * HandshakeTimeoutError: when the cryptographic handshake takes too long (this is a net.Error timeout error) +// * StatelessResetError: when we receive a stateless reset +// * VersionNegotiationError: returned by the client, when there's no version overlap between the peers +type Connection interface { + // AcceptStream returns the next stream opened by the peer, blocking until one is available. + // If the connection was closed due to a timeout, the error satisfies + // the net.Error interface, and Timeout() will be true. + AcceptStream(context.Context) (Stream, error) + // AcceptUniStream returns the next unidirectional stream opened by the peer, blocking until one is available. + // If the connection was closed due to a timeout, the error satisfies + // the net.Error interface, and Timeout() will be true. + AcceptUniStream(context.Context) (ReceiveStream, error) + // OpenStream opens a new bidirectional QUIC stream. + // There is no signaling to the peer about new streams: + // The peer can only accept the stream after data has been sent on the stream, + // or the stream has been reset or closed. + // When reaching the peer's stream limit, it is not possible to open a new stream until the + // peer raises the stream limit. In that case, a StreamLimitReachedError is returned. + OpenStream() (Stream, error) + // OpenStreamSync opens a new bidirectional QUIC stream. + // It blocks until a new stream can be opened. + // There is no signaling to the peer about new streams: + // The peer can only accept the stream after data has been sent on the stream, + // or the stream has been reset or closed. + OpenStreamSync(context.Context) (Stream, error) + // OpenUniStream opens a new outgoing unidirectional QUIC stream. + // There is no signaling to the peer about new streams: + // The peer can only accept the stream after data has been sent on the stream, + // or the stream has been reset or closed. + // When reaching the peer's stream limit, it is not possible to open a new stream until the + // peer raises the stream limit. In that case, a StreamLimitReachedError is returned. + OpenUniStream() (SendStream, error) + // OpenUniStreamSync opens a new outgoing unidirectional QUIC stream. + // It blocks until a new stream can be opened. + // There is no signaling to the peer about new streams: + // The peer can only accept the stream after data has been sent on the stream, + // or the stream has been reset or closed. + OpenUniStreamSync(context.Context) (SendStream, error) + // LocalAddr returns the local address. + LocalAddr() net.Addr + // RemoteAddr returns the address of the peer. + RemoteAddr() net.Addr + // CloseWithError closes the connection with an error. + // The error string will be sent to the peer. + CloseWithError(ApplicationErrorCode, string) error + // Context returns a context that is cancelled when the connection is closed. + // The cancellation cause is set to the error that caused the connection to + // close, or `context.Canceled` in case the listener is closed first. + Context() context.Context + // ConnectionState returns basic details about the QUIC connection. + // Warning: This API should not be considered stable and might change soon. + ConnectionState() ConnectionState + + // SendDatagram sends a message using a QUIC datagram, as specified in RFC 9221. + // There is no delivery guarantee for DATAGRAM frames, they are not retransmitted if lost. + // The payload of the datagram needs to fit into a single QUIC packet. + // In addition, a datagram may be dropped before being sent out if the available packet size suddenly decreases. + // If the payload is too large to be sent at the current time, a DatagramTooLargeError is returned. + SendDatagram(payload []byte) error + // ReceiveDatagram gets a message received in a datagram, as specified in RFC 9221. + ReceiveDatagram(context.Context) ([]byte, error) +} + +// An EarlyConnection is a connection that is handshaking. +// Data sent during the handshake is encrypted using the forward secure keys. +// When using client certificates, the client's identity is only verified +// after completion of the handshake. +type EarlyConnection interface { + Connection + + // HandshakeComplete blocks until the handshake completes (or fails). + // For the client, data sent before completion of the handshake is encrypted with 0-RTT keys. + // For the server, data sent before completion of the handshake is encrypted with 1-RTT keys, + // however the client's identity is only verified once the handshake completes. + HandshakeComplete() <-chan struct{} + + NextConnection(context.Context) (Connection, error) +} + +// StatelessResetKey is a key used to derive stateless reset tokens. +type StatelessResetKey [32]byte + +// TokenGeneratorKey is a key used to encrypt session resumption tokens. +type TokenGeneratorKey = handshake.TokenProtectorKey + +// A ConnectionID is a QUIC Connection ID, as defined in RFC 9000. +// It is not able to handle QUIC Connection IDs longer than 20 bytes, +// as they are allowed by RFC 8999. +type ConnectionID = protocol.ConnectionID + +// ConnectionIDFromBytes interprets b as a Connection ID. It panics if b is +// longer than 20 bytes. +func ConnectionIDFromBytes(b []byte) ConnectionID { + return protocol.ParseConnectionID(b) +} + +// A ConnectionIDGenerator is an interface that allows clients to implement their own format +// for the Connection IDs that servers/clients use as SrcConnectionID in QUIC packets. +// +// Connection IDs generated by an implementation should always produce IDs of constant size. +type ConnectionIDGenerator interface { + // GenerateConnectionID generates a new ConnectionID. + // Generated ConnectionIDs should be unique and observers should not be able to correlate two ConnectionIDs. + GenerateConnectionID() (ConnectionID, error) + + // ConnectionIDLen tells what is the length of the ConnectionIDs generated by the implementation of + // this interface. + // Effectively, this means that implementations of ConnectionIDGenerator must always return constant-size + // connection IDs. Valid lengths are between 0 and 20 and calls to GenerateConnectionID. + // 0-length ConnectionsIDs can be used when an endpoint (server or client) does not require multiplexing connections + // in the presence of a connection migration environment. + ConnectionIDLen() int +} + +// Config contains all configuration data needed for a QUIC server or client. +type Config struct { + // GetConfigForClient is called for incoming connections. + // If the error is not nil, the connection attempt is refused. + GetConfigForClient func(info *ClientHelloInfo) (*Config, error) + // The QUIC versions that can be negotiated. + // If not set, it uses all versions available. + Versions []Version + // HandshakeIdleTimeout is the idle timeout before completion of the handshake. + // If we don't receive any packet from the peer within this time, the connection attempt is aborted. + // Additionally, if the handshake doesn't complete in twice this time, the connection attempt is also aborted. + // If this value is zero, the timeout is set to 5 seconds. + HandshakeIdleTimeout time.Duration + // MaxIdleTimeout is the maximum duration that may pass without any incoming network activity. + // The actual value for the idle timeout is the minimum of this value and the peer's. + // This value only applies after the handshake has completed. + // If the timeout is exceeded, the connection is closed. + // If this value is zero, the timeout is set to 30 seconds. + MaxIdleTimeout time.Duration + // The TokenStore stores tokens received from the server. + // Tokens are used to skip address validation on future connection attempts. + // The key used to store tokens is the ServerName from the tls.Config, if set + // otherwise the token is associated with the server's IP address. + TokenStore TokenStore + // InitialStreamReceiveWindow is the initial size of the stream-level flow control window for receiving data. + // If the application is consuming data quickly enough, the flow control auto-tuning algorithm + // will increase the window up to MaxStreamReceiveWindow. + // If this value is zero, it will default to 512 KB. + // Values larger than the maximum varint (quicvarint.Max) will be clipped to that value. + InitialStreamReceiveWindow uint64 + // MaxStreamReceiveWindow is the maximum stream-level flow control window for receiving data. + // If this value is zero, it will default to 6 MB. + // Values larger than the maximum varint (quicvarint.Max) will be clipped to that value. + MaxStreamReceiveWindow uint64 + // InitialConnectionReceiveWindow is the initial size of the stream-level flow control window for receiving data. + // If the application is consuming data quickly enough, the flow control auto-tuning algorithm + // will increase the window up to MaxConnectionReceiveWindow. + // If this value is zero, it will default to 512 KB. + // Values larger than the maximum varint (quicvarint.Max) will be clipped to that value. + InitialConnectionReceiveWindow uint64 + // MaxConnectionReceiveWindow is the connection-level flow control window for receiving data. + // If this value is zero, it will default to 15 MB. + // Values larger than the maximum varint (quicvarint.Max) will be clipped to that value. + MaxConnectionReceiveWindow uint64 + // AllowConnectionWindowIncrease is called every time the connection flow controller attempts + // to increase the connection flow control window. + // If set, the caller can prevent an increase of the window. Typically, it would do so to + // limit the memory usage. + // To avoid deadlocks, it is not valid to call other functions on the connection or on streams + // in this callback. + AllowConnectionWindowIncrease func(conn Connection, delta uint64) bool + // MaxIncomingStreams is the maximum number of concurrent bidirectional streams that a peer is allowed to open. + // If not set, it will default to 100. + // If set to a negative value, it doesn't allow any bidirectional streams. + // Values larger than 2^60 will be clipped to that value. + MaxIncomingStreams int64 + // MaxIncomingUniStreams is the maximum number of concurrent unidirectional streams that a peer is allowed to open. + // If not set, it will default to 100. + // If set to a negative value, it doesn't allow any unidirectional streams. + // Values larger than 2^60 will be clipped to that value. + MaxIncomingUniStreams int64 + // KeepAlivePeriod defines whether this peer will periodically send a packet to keep the connection alive. + // If set to 0, then no keep alive is sent. Otherwise, the keep alive is sent on that period (or at most + // every half of MaxIdleTimeout, whichever is smaller). + KeepAlivePeriod time.Duration + // InitialPacketSize is the initial size of packets sent. + // It is usually not necessary to manually set this value, + // since Path MTU discovery very quickly finds the path's MTU. + // If set too high, the path might not support packets that large, leading to a timeout of the QUIC handshake. + // Values below 1200 are invalid. + InitialPacketSize uint16 + // DisablePathMTUDiscovery disables Path MTU Discovery (RFC 8899). + // This allows the sending of QUIC packets that fully utilize the available MTU of the path. + // Path MTU discovery is only available on systems that allow setting of the Don't Fragment (DF) bit. + DisablePathMTUDiscovery bool + // Allow0RTT allows the application to decide if a 0-RTT connection attempt should be accepted. + // Only valid for the server. + Allow0RTT bool + // Enable QUIC datagram support (RFC 9221). + EnableDatagrams bool + Tracer func(context.Context, logging.Perspective, ConnectionID) *logging.ConnectionTracer +} + +// ClientHelloInfo contains information about an incoming connection attempt. +type ClientHelloInfo struct { + // RemoteAddr is the remote address on the Initial packet. + // Unless AddrVerified is set, the address is not yet verified, and could be a spoofed IP address. + RemoteAddr net.Addr + // AddrVerified says if the remote address was verified using QUIC's Retry mechanism. + // Note that the Retry mechanism costs one network roundtrip, + // and is not performed unless Transport.MaxUnvalidatedHandshakes is surpassed. + AddrVerified bool +} + +// ConnectionState records basic details about a QUIC connection +type ConnectionState struct { + // TLS contains information about the TLS connection state, incl. the tls.ConnectionState. + TLS tls.ConnectionState + // SupportsDatagrams says if support for QUIC datagrams (RFC 9221) was negotiated. + // This requires both nodes to support and enable the datagram extensions (via Config.EnableDatagrams). + // If datagram support was negotiated, datagrams can be sent and received using the + // SendDatagram and ReceiveDatagram methods on the Connection. + SupportsDatagrams bool + // Used0RTT says if 0-RTT resumption was used. + Used0RTT bool + // Version is the QUIC version of the QUIC connection. + Version Version + // GSO says if generic segmentation offload is used + GSO bool +} diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/ack_eliciting.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/ack_eliciting.go new file mode 100644 index 00000000..34506b12 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/ack_eliciting.go @@ -0,0 +1,20 @@ +package ackhandler + +import "github.com/quic-go/quic-go/internal/wire" + +// IsFrameAckEliciting returns true if the frame is ack-eliciting. +func IsFrameAckEliciting(f wire.Frame) bool { + _, isAck := f.(*wire.AckFrame) + _, isConnectionClose := f.(*wire.ConnectionCloseFrame) + return !isAck && !isConnectionClose +} + +// HasAckElicitingFrames returns true if at least one frame is ack-eliciting. +func HasAckElicitingFrames(fs []Frame) bool { + for _, f := range fs { + if IsFrameAckEliciting(f.Frame) { + return true + } + } + return false +} diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/ackhandler.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/ackhandler.go new file mode 100644 index 00000000..6f890b4d --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/ackhandler.go @@ -0,0 +1,24 @@ +package ackhandler + +import ( + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/logging" +) + +// NewAckHandler creates a new SentPacketHandler and a new ReceivedPacketHandler. +// clientAddressValidated indicates whether the address was validated beforehand by an address validation token. +// clientAddressValidated has no effect for a client. +func NewAckHandler( + initialPacketNumber protocol.PacketNumber, + initialMaxDatagramSize protocol.ByteCount, + rttStats *utils.RTTStats, + clientAddressValidated bool, + enableECN bool, + pers protocol.Perspective, + tracer *logging.ConnectionTracer, + logger utils.Logger, +) (SentPacketHandler, ReceivedPacketHandler) { + sph := newSentPacketHandler(initialPacketNumber, initialMaxDatagramSize, rttStats, clientAddressValidated, enableECN, pers, tracer, logger) + return sph, newReceivedPacketHandler(sph, logger) +} diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/ecn.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/ecn.go new file mode 100644 index 00000000..68415ac6 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/ecn.go @@ -0,0 +1,296 @@ +package ackhandler + +import ( + "fmt" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/logging" +) + +type ecnState uint8 + +const ( + ecnStateInitial ecnState = iota + ecnStateTesting + ecnStateUnknown + ecnStateCapable + ecnStateFailed +) + +// must fit into an uint8, otherwise numSentTesting and numLostTesting must have a larger type +const numECNTestingPackets = 10 + +type ecnHandler interface { + SentPacket(protocol.PacketNumber, protocol.ECN) + Mode() protocol.ECN + HandleNewlyAcked(packets []*packet, ect0, ect1, ecnce int64) (congested bool) + LostPacket(protocol.PacketNumber) +} + +// The ecnTracker performs ECN validation of a path. +// Once failed, it doesn't do any re-validation of the path. +// It is designed only work for 1-RTT packets, it doesn't handle multiple packet number spaces. +// In order to avoid revealing any internal state to on-path observers, +// callers should make sure to start using ECN (i.e. calling Mode) for the very first 1-RTT packet sent. +// The validation logic implemented here strictly follows the algorithm described in RFC 9000 section 13.4.2 and A.4. +type ecnTracker struct { + state ecnState + numSentTesting, numLostTesting uint8 + + firstTestingPacket protocol.PacketNumber + lastTestingPacket protocol.PacketNumber + firstCapablePacket protocol.PacketNumber + + numSentECT0, numSentECT1 int64 + numAckedECT0, numAckedECT1, numAckedECNCE int64 + + tracer *logging.ConnectionTracer + logger utils.Logger +} + +var _ ecnHandler = &ecnTracker{} + +func newECNTracker(logger utils.Logger, tracer *logging.ConnectionTracer) *ecnTracker { + return &ecnTracker{ + firstTestingPacket: protocol.InvalidPacketNumber, + lastTestingPacket: protocol.InvalidPacketNumber, + firstCapablePacket: protocol.InvalidPacketNumber, + state: ecnStateInitial, + logger: logger, + tracer: tracer, + } +} + +func (e *ecnTracker) SentPacket(pn protocol.PacketNumber, ecn protocol.ECN) { + //nolint:exhaustive // These are the only ones we need to take care of. + switch ecn { + case protocol.ECNNon: + return + case protocol.ECT0: + e.numSentECT0++ + case protocol.ECT1: + e.numSentECT1++ + case protocol.ECNUnsupported: + if e.state != ecnStateFailed { + panic("didn't expect ECN to be unsupported") + } + default: + panic(fmt.Sprintf("sent packet with unexpected ECN marking: %s", ecn)) + } + + if e.state == ecnStateCapable && e.firstCapablePacket == protocol.InvalidPacketNumber { + e.firstCapablePacket = pn + } + + if e.state != ecnStateTesting { + return + } + + e.numSentTesting++ + if e.firstTestingPacket == protocol.InvalidPacketNumber { + e.firstTestingPacket = pn + } + if e.numSentECT0+e.numSentECT1 >= numECNTestingPackets { + if e.tracer != nil && e.tracer.ECNStateUpdated != nil { + e.tracer.ECNStateUpdated(logging.ECNStateUnknown, logging.ECNTriggerNoTrigger) + } + e.state = ecnStateUnknown + e.lastTestingPacket = pn + } +} + +func (e *ecnTracker) Mode() protocol.ECN { + switch e.state { + case ecnStateInitial: + if e.tracer != nil && e.tracer.ECNStateUpdated != nil { + e.tracer.ECNStateUpdated(logging.ECNStateTesting, logging.ECNTriggerNoTrigger) + } + e.state = ecnStateTesting + return e.Mode() + case ecnStateTesting, ecnStateCapable: + return protocol.ECT0 + case ecnStateUnknown, ecnStateFailed: + return protocol.ECNNon + default: + panic(fmt.Sprintf("unknown ECN state: %d", e.state)) + } +} + +func (e *ecnTracker) LostPacket(pn protocol.PacketNumber) { + if e.state != ecnStateTesting && e.state != ecnStateUnknown { + return + } + if !e.isTestingPacket(pn) { + return + } + e.numLostTesting++ + // Only proceed if we have sent all 10 testing packets. + if e.state != ecnStateUnknown { + return + } + if e.numLostTesting >= e.numSentTesting { + e.logger.Debugf("Disabling ECN. All testing packets were lost.") + if e.tracer != nil && e.tracer.ECNStateUpdated != nil { + e.tracer.ECNStateUpdated(logging.ECNStateFailed, logging.ECNFailedLostAllTestingPackets) + } + e.state = ecnStateFailed + return + } + // Path validation also fails if some testing packets are lost, and all other testing packets where CE-marked + e.failIfMangled() +} + +// HandleNewlyAcked handles the ECN counts on an ACK frame. +// It must only be called for ACK frames that increase the largest acknowledged packet number, +// see section 13.4.2.1 of RFC 9000. +func (e *ecnTracker) HandleNewlyAcked(packets []*packet, ect0, ect1, ecnce int64) (congested bool) { + if e.state == ecnStateFailed { + return false + } + + // ECN validation can fail if the received total count for either ECT(0) or ECT(1) exceeds + // the total number of packets sent with each corresponding ECT codepoint. + if ect0 > e.numSentECT0 || ect1 > e.numSentECT1 { + e.logger.Debugf("Disabling ECN. Received more ECT(0) / ECT(1) acknowledgements than packets sent.") + if e.tracer != nil && e.tracer.ECNStateUpdated != nil { + e.tracer.ECNStateUpdated(logging.ECNStateFailed, logging.ECNFailedMoreECNCountsThanSent) + } + e.state = ecnStateFailed + return false + } + + // Count ECT0 and ECT1 marks that we used when sending the packets that are now being acknowledged. + var ackedECT0, ackedECT1 int64 + for _, p := range packets { + //nolint:exhaustive // We only ever send ECT(0) and ECT(1). + switch e.ecnMarking(p.PacketNumber) { + case protocol.ECT0: + ackedECT0++ + case protocol.ECT1: + ackedECT1++ + } + } + + // If an ACK frame newly acknowledges a packet that the endpoint sent with either the ECT(0) or ECT(1) + // codepoint set, ECN validation fails if the corresponding ECN counts are not present in the ACK frame. + // This check detects: + // * paths that bleach all ECN marks, and + // * peers that don't report any ECN counts + if (ackedECT0 > 0 || ackedECT1 > 0) && ect0 == 0 && ect1 == 0 && ecnce == 0 { + e.logger.Debugf("Disabling ECN. ECN-marked packet acknowledged, but no ECN counts on ACK frame.") + if e.tracer != nil && e.tracer.ECNStateUpdated != nil { + e.tracer.ECNStateUpdated(logging.ECNStateFailed, logging.ECNFailedNoECNCounts) + } + e.state = ecnStateFailed + return false + } + + // Determine the increase in ECT0, ECT1 and ECNCE marks + newECT0 := ect0 - e.numAckedECT0 + newECT1 := ect1 - e.numAckedECT1 + newECNCE := ecnce - e.numAckedECNCE + + // We're only processing ACKs that increase the Largest Acked. + // Therefore, the ECN counters should only ever increase. + // Any decrease means that the peer's counting logic is broken. + if newECT0 < 0 || newECT1 < 0 || newECNCE < 0 { + e.logger.Debugf("Disabling ECN. ECN counts decreased unexpectedly.") + if e.tracer != nil && e.tracer.ECNStateUpdated != nil { + e.tracer.ECNStateUpdated(logging.ECNStateFailed, logging.ECNFailedDecreasedECNCounts) + } + e.state = ecnStateFailed + return false + } + + // ECN validation also fails if the sum of the increase in ECT(0) and ECN-CE counts is less than the number + // of newly acknowledged packets that were originally sent with an ECT(0) marking. + // This could be the result of (partial) bleaching. + if newECT0+newECNCE < ackedECT0 { + e.logger.Debugf("Disabling ECN. Received less ECT(0) + ECN-CE than packets sent with ECT(0).") + if e.tracer != nil && e.tracer.ECNStateUpdated != nil { + e.tracer.ECNStateUpdated(logging.ECNStateFailed, logging.ECNFailedTooFewECNCounts) + } + e.state = ecnStateFailed + return false + } + // Similarly, ECN validation fails if the sum of the increases to ECT(1) and ECN-CE counts is less than + // the number of newly acknowledged packets sent with an ECT(1) marking. + if newECT1+newECNCE < ackedECT1 { + e.logger.Debugf("Disabling ECN. Received less ECT(1) + ECN-CE than packets sent with ECT(1).") + if e.tracer != nil && e.tracer.ECNStateUpdated != nil { + e.tracer.ECNStateUpdated(logging.ECNStateFailed, logging.ECNFailedTooFewECNCounts) + } + e.state = ecnStateFailed + return false + } + + // update our counters + e.numAckedECT0 = ect0 + e.numAckedECT1 = ect1 + e.numAckedECNCE = ecnce + + // Detect mangling (a path remarking all ECN-marked testing packets as CE), + // once all 10 testing packets have been sent out. + if e.state == ecnStateUnknown { + e.failIfMangled() + if e.state == ecnStateFailed { + return false + } + } + if e.state == ecnStateTesting || e.state == ecnStateUnknown { + var ackedTestingPacket bool + for _, p := range packets { + if e.isTestingPacket(p.PacketNumber) { + ackedTestingPacket = true + break + } + } + // This check won't succeed if the path is mangling ECN-marks (i.e. rewrites all ECN-marked packets to CE). + if ackedTestingPacket && (newECT0 > 0 || newECT1 > 0) { + e.logger.Debugf("ECN capability confirmed.") + if e.tracer != nil && e.tracer.ECNStateUpdated != nil { + e.tracer.ECNStateUpdated(logging.ECNStateCapable, logging.ECNTriggerNoTrigger) + } + e.state = ecnStateCapable + } + } + + // Don't trust CE marks before having confirmed ECN capability of the path. + // Otherwise, mangling would be misinterpreted as actual congestion. + return e.state == ecnStateCapable && newECNCE > 0 +} + +// failIfMangled fails ECN validation if all testing packets are lost or CE-marked. +func (e *ecnTracker) failIfMangled() { + numAckedECNCE := e.numAckedECNCE + int64(e.numLostTesting) + if e.numSentECT0+e.numSentECT1 > numAckedECNCE { + return + } + if e.tracer != nil && e.tracer.ECNStateUpdated != nil { + e.tracer.ECNStateUpdated(logging.ECNStateFailed, logging.ECNFailedManglingDetected) + } + e.state = ecnStateFailed +} + +func (e *ecnTracker) ecnMarking(pn protocol.PacketNumber) protocol.ECN { + if pn < e.firstTestingPacket || e.firstTestingPacket == protocol.InvalidPacketNumber { + return protocol.ECNNon + } + if pn < e.lastTestingPacket || e.lastTestingPacket == protocol.InvalidPacketNumber { + return protocol.ECT0 + } + if pn < e.firstCapablePacket || e.firstCapablePacket == protocol.InvalidPacketNumber { + return protocol.ECNNon + } + // We don't need to deal with the case when ECN validation fails, + // since we're ignoring any ECN counts reported in ACK frames in that case. + return protocol.ECT0 +} + +func (e *ecnTracker) isTestingPacket(pn protocol.PacketNumber) bool { + if e.firstTestingPacket == protocol.InvalidPacketNumber { + return false + } + return pn >= e.firstTestingPacket && (pn <= e.lastTestingPacket || e.lastTestingPacket == protocol.InvalidPacketNumber) +} diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/frame.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/frame.go new file mode 100644 index 00000000..e03a8080 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/frame.go @@ -0,0 +1,21 @@ +package ackhandler + +import ( + "github.com/quic-go/quic-go/internal/wire" +) + +// FrameHandler handles the acknowledgement and the loss of a frame. +type FrameHandler interface { + OnAcked(wire.Frame) + OnLost(wire.Frame) +} + +type Frame struct { + Frame wire.Frame // nil if the frame has already been acknowledged in another packet + Handler FrameHandler +} + +type StreamFrame struct { + Frame *wire.StreamFrame + Handler FrameHandler +} diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go new file mode 100644 index 00000000..ba8cbbda --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go @@ -0,0 +1,53 @@ +package ackhandler + +import ( + "time" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/wire" +) + +// SentPacketHandler handles ACKs received for outgoing packets +type SentPacketHandler interface { + // SentPacket may modify the packet + SentPacket(t time.Time, pn, largestAcked protocol.PacketNumber, streamFrames []StreamFrame, frames []Frame, encLevel protocol.EncryptionLevel, ecn protocol.ECN, size protocol.ByteCount, isPathMTUProbePacket bool) + // ReceivedAck processes an ACK frame. + // It does not store a copy of the frame. + ReceivedAck(f *wire.AckFrame, encLevel protocol.EncryptionLevel, rcvTime time.Time) (bool /* 1-RTT packet acked */, error) + ReceivedBytes(protocol.ByteCount) + DropPackets(protocol.EncryptionLevel) + ResetForRetry(rcvTime time.Time) error + SetHandshakeConfirmed() + + // The SendMode determines if and what kind of packets can be sent. + SendMode(now time.Time) SendMode + // TimeUntilSend is the time when the next packet should be sent. + // It is used for pacing packets. + TimeUntilSend() time.Time + SetMaxDatagramSize(count protocol.ByteCount) + + // only to be called once the handshake is complete + QueueProbePacket(protocol.EncryptionLevel) bool /* was a packet queued */ + + ECNMode(isShortHeaderPacket bool) protocol.ECN // isShortHeaderPacket should only be true for non-coalesced 1-RTT packets + PeekPacketNumber(protocol.EncryptionLevel) (protocol.PacketNumber, protocol.PacketNumberLen) + PopPacketNumber(protocol.EncryptionLevel) protocol.PacketNumber + + GetLossDetectionTimeout() time.Time + OnLossDetectionTimeout() error +} + +type sentPacketTracker interface { + GetLowestPacketNotConfirmedAcked() protocol.PacketNumber + ReceivedPacket(protocol.EncryptionLevel) +} + +// ReceivedPacketHandler handles ACKs needed to send for incoming packets +type ReceivedPacketHandler interface { + IsPotentiallyDuplicate(protocol.PacketNumber, protocol.EncryptionLevel) bool + ReceivedPacket(pn protocol.PacketNumber, ecn protocol.ECN, encLevel protocol.EncryptionLevel, rcvTime time.Time, ackEliciting bool) error + DropPackets(protocol.EncryptionLevel) + + GetAlarmTimeout() time.Time + GetAckFrame(encLevel protocol.EncryptionLevel, onlyIfQueued bool) *wire.AckFrame +} diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/mockgen.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/mockgen.go new file mode 100644 index 00000000..0031e6b1 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/mockgen.go @@ -0,0 +1,9 @@ +//go:build gomock || generate + +package ackhandler + +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package ackhandler -destination mock_sent_packet_tracker_test.go github.com/quic-go/quic-go/internal/ackhandler SentPacketTracker" +type SentPacketTracker = sentPacketTracker + +//go:generate sh -c "go run go.uber.org/mock/mockgen -build_flags=\"-tags=gomock\" -package ackhandler -destination mock_ecn_handler_test.go github.com/quic-go/quic-go/internal/ackhandler ECNHandler" +type ECNHandler = ecnHandler diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet.go new file mode 100644 index 00000000..5f43689b --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet.go @@ -0,0 +1,55 @@ +package ackhandler + +import ( + "sync" + "time" + + "github.com/quic-go/quic-go/internal/protocol" +) + +// A Packet is a packet +type packet struct { + SendTime time.Time + PacketNumber protocol.PacketNumber + StreamFrames []StreamFrame + Frames []Frame + LargestAcked protocol.PacketNumber // InvalidPacketNumber if the packet doesn't contain an ACK + Length protocol.ByteCount + EncryptionLevel protocol.EncryptionLevel + + IsPathMTUProbePacket bool // We don't report the loss of Path MTU probe packets to the congestion controller. + + includedInBytesInFlight bool + declaredLost bool + skippedPacket bool +} + +func (p *packet) outstanding() bool { + return !p.declaredLost && !p.skippedPacket && !p.IsPathMTUProbePacket +} + +var packetPool = sync.Pool{New: func() any { return &packet{} }} + +func getPacket() *packet { + p := packetPool.Get().(*packet) + p.PacketNumber = 0 + p.StreamFrames = nil + p.Frames = nil + p.LargestAcked = 0 + p.Length = 0 + p.EncryptionLevel = protocol.EncryptionLevel(0) + p.SendTime = time.Time{} + p.IsPathMTUProbePacket = false + p.includedInBytesInFlight = false + p.declaredLost = false + p.skippedPacket = false + return p +} + +// We currently only return Packets back into the pool when they're acknowledged (not when they're lost). +// This simplifies the code, and gives the vast majority of the performance benefit we can gain from using the pool. +func putPacket(p *packet) { + p.Frames = nil + p.StreamFrames = nil + packetPool.Put(p) +} diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet_number_generator.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet_number_generator.go new file mode 100644 index 00000000..4a9db863 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet_number_generator.go @@ -0,0 +1,84 @@ +package ackhandler + +import ( + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" +) + +type packetNumberGenerator interface { + Peek() protocol.PacketNumber + // Pop pops the packet number. + // It reports if the packet number (before the one just popped) was skipped. + // It never skips more than one packet number in a row. + Pop() (skipped bool, _ protocol.PacketNumber) +} + +type sequentialPacketNumberGenerator struct { + next protocol.PacketNumber +} + +var _ packetNumberGenerator = &sequentialPacketNumberGenerator{} + +func newSequentialPacketNumberGenerator(initial protocol.PacketNumber) packetNumberGenerator { + return &sequentialPacketNumberGenerator{next: initial} +} + +func (p *sequentialPacketNumberGenerator) Peek() protocol.PacketNumber { + return p.next +} + +func (p *sequentialPacketNumberGenerator) Pop() (bool, protocol.PacketNumber) { + next := p.next + p.next++ + return false, next +} + +// The skippingPacketNumberGenerator generates the packet number for the next packet +// it randomly skips a packet number every averagePeriod packets (on average). +// It is guaranteed to never skip two consecutive packet numbers. +type skippingPacketNumberGenerator struct { + period protocol.PacketNumber + maxPeriod protocol.PacketNumber + + next protocol.PacketNumber + nextToSkip protocol.PacketNumber + + rng utils.Rand +} + +var _ packetNumberGenerator = &skippingPacketNumberGenerator{} + +func newSkippingPacketNumberGenerator(initial, initialPeriod, maxPeriod protocol.PacketNumber) packetNumberGenerator { + g := &skippingPacketNumberGenerator{ + next: initial, + period: initialPeriod, + maxPeriod: maxPeriod, + } + g.generateNewSkip() + return g +} + +func (p *skippingPacketNumberGenerator) Peek() protocol.PacketNumber { + if p.next == p.nextToSkip { + return p.next + 1 + } + return p.next +} + +func (p *skippingPacketNumberGenerator) Pop() (bool, protocol.PacketNumber) { + next := p.next + if p.next == p.nextToSkip { + next++ + p.next += 2 + p.generateNewSkip() + return true, next + } + p.next++ // generate a new packet number for the next packet + return false, next +} + +func (p *skippingPacketNumberGenerator) generateNewSkip() { + // make sure that there are never two consecutive packet numbers that are skipped + p.nextToSkip = p.next + 3 + protocol.PacketNumber(p.rng.Int31n(int32(2*p.period))) + p.period = min(2*p.period, p.maxPeriod) +} diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_handler.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_handler.go new file mode 100644 index 00000000..1175c790 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_handler.go @@ -0,0 +1,125 @@ +package ackhandler + +import ( + "fmt" + "time" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" +) + +type receivedPacketHandler struct { + sentPackets sentPacketTracker + + initialPackets *receivedPacketTracker + handshakePackets *receivedPacketTracker + appDataPackets appDataReceivedPacketTracker + + lowest1RTTPacket protocol.PacketNumber +} + +var _ ReceivedPacketHandler = &receivedPacketHandler{} + +func newReceivedPacketHandler(sentPackets sentPacketTracker, logger utils.Logger) ReceivedPacketHandler { + return &receivedPacketHandler{ + sentPackets: sentPackets, + initialPackets: newReceivedPacketTracker(), + handshakePackets: newReceivedPacketTracker(), + appDataPackets: *newAppDataReceivedPacketTracker(logger), + lowest1RTTPacket: protocol.InvalidPacketNumber, + } +} + +func (h *receivedPacketHandler) ReceivedPacket( + pn protocol.PacketNumber, + ecn protocol.ECN, + encLevel protocol.EncryptionLevel, + rcvTime time.Time, + ackEliciting bool, +) error { + h.sentPackets.ReceivedPacket(encLevel) + switch encLevel { + case protocol.EncryptionInitial: + return h.initialPackets.ReceivedPacket(pn, ecn, rcvTime, ackEliciting) + case protocol.EncryptionHandshake: + // The Handshake packet number space might already have been dropped as a result + // of processing the CRYPTO frame that was contained in this packet. + if h.handshakePackets == nil { + return nil + } + return h.handshakePackets.ReceivedPacket(pn, ecn, rcvTime, ackEliciting) + case protocol.Encryption0RTT: + if h.lowest1RTTPacket != protocol.InvalidPacketNumber && pn > h.lowest1RTTPacket { + return fmt.Errorf("received packet number %d on a 0-RTT packet after receiving %d on a 1-RTT packet", pn, h.lowest1RTTPacket) + } + return h.appDataPackets.ReceivedPacket(pn, ecn, rcvTime, ackEliciting) + case protocol.Encryption1RTT: + if h.lowest1RTTPacket == protocol.InvalidPacketNumber || pn < h.lowest1RTTPacket { + h.lowest1RTTPacket = pn + } + if err := h.appDataPackets.ReceivedPacket(pn, ecn, rcvTime, ackEliciting); err != nil { + return err + } + h.appDataPackets.IgnoreBelow(h.sentPackets.GetLowestPacketNotConfirmedAcked()) + return nil + default: + panic(fmt.Sprintf("received packet with unknown encryption level: %s", encLevel)) + } +} + +func (h *receivedPacketHandler) DropPackets(encLevel protocol.EncryptionLevel) { + //nolint:exhaustive // 1-RTT packet number space is never dropped. + switch encLevel { + case protocol.EncryptionInitial: + h.initialPackets = nil + case protocol.EncryptionHandshake: + h.handshakePackets = nil + case protocol.Encryption0RTT: + // Nothing to do here. + // If we are rejecting 0-RTT, no 0-RTT packets will have been decrypted. + default: + panic(fmt.Sprintf("Cannot drop keys for encryption level %s", encLevel)) + } +} + +func (h *receivedPacketHandler) GetAlarmTimeout() time.Time { + return h.appDataPackets.GetAlarmTimeout() +} + +func (h *receivedPacketHandler) GetAckFrame(encLevel protocol.EncryptionLevel, onlyIfQueued bool) *wire.AckFrame { + //nolint:exhaustive // 0-RTT packets can't contain ACK frames. + switch encLevel { + case protocol.EncryptionInitial: + if h.initialPackets != nil { + return h.initialPackets.GetAckFrame() + } + return nil + case protocol.EncryptionHandshake: + if h.handshakePackets != nil { + return h.handshakePackets.GetAckFrame() + } + return nil + case protocol.Encryption1RTT: + return h.appDataPackets.GetAckFrame(onlyIfQueued) + default: + // 0-RTT packets can't contain ACK frames + return nil + } +} + +func (h *receivedPacketHandler) IsPotentiallyDuplicate(pn protocol.PacketNumber, encLevel protocol.EncryptionLevel) bool { + switch encLevel { + case protocol.EncryptionInitial: + if h.initialPackets != nil { + return h.initialPackets.IsPotentiallyDuplicate(pn) + } + case protocol.EncryptionHandshake: + if h.handshakePackets != nil { + return h.handshakePackets.IsPotentiallyDuplicate(pn) + } + case protocol.Encryption0RTT, protocol.Encryption1RTT: + return h.appDataPackets.IsPotentiallyDuplicate(pn) + } + panic("unexpected encryption level") +} diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_history.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_history.go new file mode 100644 index 00000000..f9feae1d --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_history.go @@ -0,0 +1,141 @@ +package ackhandler + +import ( + "slices" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/wire" +) + +// interval is an interval from one PacketNumber to the other +type interval struct { + Start protocol.PacketNumber + End protocol.PacketNumber +} + +// The receivedPacketHistory stores if a packet number has already been received. +// It generates ACK ranges which can be used to assemble an ACK frame. +// It does not store packet contents. +type receivedPacketHistory struct { + ranges []interval // maximum length: protocol.MaxNumAckRanges + + deletedBelow protocol.PacketNumber +} + +func newReceivedPacketHistory() *receivedPacketHistory { + return &receivedPacketHistory{} +} + +// ReceivedPacket registers a packet with PacketNumber p and updates the ranges +func (h *receivedPacketHistory) ReceivedPacket(p protocol.PacketNumber) bool /* is a new packet (and not a duplicate / delayed packet) */ { + // ignore delayed packets, if we already deleted the range + if p < h.deletedBelow { + return false + } + + isNew := h.addToRanges(p) + // Delete old ranges, if we're tracking too many of them. + // This is a DoS defense against a peer that sends us too many gaps. + if len(h.ranges) > protocol.MaxNumAckRanges { + h.ranges = slices.Delete(h.ranges, 0, len(h.ranges)-protocol.MaxNumAckRanges) + } + return isNew +} + +func (h *receivedPacketHistory) addToRanges(p protocol.PacketNumber) bool /* is a new packet (and not a duplicate / delayed packet) */ { + if len(h.ranges) == 0 { + h.ranges = append(h.ranges, interval{Start: p, End: p}) + return true + } + + for i := len(h.ranges) - 1; i >= 0; i-- { + // p already included in an existing range. Nothing to do here + if p >= h.ranges[i].Start && p <= h.ranges[i].End { + return false + } + + if h.ranges[i].End == p-1 { // extend a range at the end + h.ranges[i].End = p + return true + } + if h.ranges[i].Start == p+1 { // extend a range at the beginning + h.ranges[i].Start = p + + if i > 0 && h.ranges[i-1].End+1 == h.ranges[i].Start { // merge two ranges + h.ranges[i-1].End = h.ranges[i].End + h.ranges = slices.Delete(h.ranges, i, i+1) + } + return true + } + + // create a new range after the current one + if p > h.ranges[i].End { + h.ranges = slices.Insert(h.ranges, i+1, interval{Start: p, End: p}) + return true + } + } + + // create a new range at the beginning + h.ranges = slices.Insert(h.ranges, 0, interval{Start: p, End: p}) + return true +} + +// DeleteBelow deletes all entries below (but not including) p +func (h *receivedPacketHistory) DeleteBelow(p protocol.PacketNumber) { + if p < h.deletedBelow { + return + } + h.deletedBelow = p + + if len(h.ranges) == 0 { + return + } + + idx := -1 + for i := 0; i < len(h.ranges); i++ { + if h.ranges[i].End < p { // delete a whole range + idx = i + } else if p > h.ranges[i].Start && p <= h.ranges[i].End { + h.ranges[i].Start = p + break + } else { // no ranges affected. Nothing to do + break + } + } + if idx >= 0 { + h.ranges = slices.Delete(h.ranges, 0, idx+1) + } +} + +// AppendAckRanges appends to a slice of all AckRanges that can be used in an AckFrame +func (h *receivedPacketHistory) AppendAckRanges(ackRanges []wire.AckRange) []wire.AckRange { + for i := len(h.ranges) - 1; i >= 0; i-- { + ackRanges = append(ackRanges, wire.AckRange{Smallest: h.ranges[i].Start, Largest: h.ranges[i].End}) + } + return ackRanges +} + +func (h *receivedPacketHistory) GetHighestAckRange() wire.AckRange { + ackRange := wire.AckRange{} + if len(h.ranges) > 0 { + ackRange.Smallest = h.ranges[len(h.ranges)-1].Start + ackRange.Largest = h.ranges[len(h.ranges)-1].End + } + return ackRange +} + +func (h *receivedPacketHistory) IsPotentiallyDuplicate(p protocol.PacketNumber) bool { + if p < h.deletedBelow { + return true + } + // Iterating over the slices is faster than using a binary search (using slices.BinarySearchFunc). + for i := len(h.ranges) - 1; i >= 0; i-- { + if p > h.ranges[i].End { + return false + } + if p <= h.ranges[i].End && p >= h.ranges[i].Start { + return true + } + } + return false +} diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_tracker.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_tracker.go new file mode 100644 index 00000000..08af6f1e --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_tracker.go @@ -0,0 +1,220 @@ +package ackhandler + +import ( + "fmt" + "time" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" +) + +// The receivedPacketTracker tracks packets for the Initial and Handshake packet number space. +// Every received packet is acknowledged immediately. +type receivedPacketTracker struct { + ect0, ect1, ecnce uint64 + + packetHistory receivedPacketHistory + + lastAck *wire.AckFrame + hasNewAck bool // true as soon as we received an ack-eliciting new packet +} + +func newReceivedPacketTracker() *receivedPacketTracker { + return &receivedPacketTracker{packetHistory: *newReceivedPacketHistory()} +} + +func (h *receivedPacketTracker) ReceivedPacket(pn protocol.PacketNumber, ecn protocol.ECN, rcvTime time.Time, ackEliciting bool) error { + if isNew := h.packetHistory.ReceivedPacket(pn); !isNew { + return fmt.Errorf("recevedPacketTracker BUG: ReceivedPacket called for old / duplicate packet %d", pn) + } + + //nolint:exhaustive // Only need to count ECT(0), ECT(1) and ECN-CE. + switch ecn { + case protocol.ECT0: + h.ect0++ + case protocol.ECT1: + h.ect1++ + case protocol.ECNCE: + h.ecnce++ + } + if !ackEliciting { + return nil + } + h.hasNewAck = true + return nil +} + +func (h *receivedPacketTracker) GetAckFrame() *wire.AckFrame { + if !h.hasNewAck { + return nil + } + + // This function always returns the same ACK frame struct, filled with the most recent values. + ack := h.lastAck + if ack == nil { + ack = &wire.AckFrame{} + } + ack.Reset() + ack.ECT0 = h.ect0 + ack.ECT1 = h.ect1 + ack.ECNCE = h.ecnce + ack.AckRanges = h.packetHistory.AppendAckRanges(ack.AckRanges) + + h.lastAck = ack + h.hasNewAck = false + return ack +} + +func (h *receivedPacketTracker) IsPotentiallyDuplicate(pn protocol.PacketNumber) bool { + return h.packetHistory.IsPotentiallyDuplicate(pn) +} + +// number of ack-eliciting packets received before sending an ACK +const packetsBeforeAck = 2 + +// The appDataReceivedPacketTracker tracks packets received in the Application Data packet number space. +// It waits until at least 2 packets were received before queueing an ACK, or until the max_ack_delay was reached. +type appDataReceivedPacketTracker struct { + receivedPacketTracker + + largestObservedRcvdTime time.Time + + largestObserved protocol.PacketNumber + ignoreBelow protocol.PacketNumber + + maxAckDelay time.Duration + ackQueued bool // true if we need send a new ACK + + ackElicitingPacketsReceivedSinceLastAck int + ackAlarm time.Time + + logger utils.Logger +} + +func newAppDataReceivedPacketTracker(logger utils.Logger) *appDataReceivedPacketTracker { + h := &appDataReceivedPacketTracker{ + receivedPacketTracker: *newReceivedPacketTracker(), + maxAckDelay: protocol.MaxAckDelay, + logger: logger, + } + return h +} + +func (h *appDataReceivedPacketTracker) ReceivedPacket(pn protocol.PacketNumber, ecn protocol.ECN, rcvTime time.Time, ackEliciting bool) error { + if err := h.receivedPacketTracker.ReceivedPacket(pn, ecn, rcvTime, ackEliciting); err != nil { + return err + } + if pn >= h.largestObserved { + h.largestObserved = pn + h.largestObservedRcvdTime = rcvTime + } + if !ackEliciting { + return nil + } + h.ackElicitingPacketsReceivedSinceLastAck++ + isMissing := h.isMissing(pn) + if !h.ackQueued && h.shouldQueueACK(pn, ecn, isMissing) { + h.ackQueued = true + h.ackAlarm = time.Time{} // cancel the ack alarm + } + if !h.ackQueued { + // No ACK queued, but we'll need to acknowledge the packet after max_ack_delay. + h.ackAlarm = rcvTime.Add(h.maxAckDelay) + if h.logger.Debug() { + h.logger.Debugf("\tSetting ACK timer to max ack delay: %s", h.maxAckDelay) + } + } + return nil +} + +// IgnoreBelow sets a lower limit for acknowledging packets. +// Packets with packet numbers smaller than p will not be acked. +func (h *appDataReceivedPacketTracker) IgnoreBelow(pn protocol.PacketNumber) { + if pn <= h.ignoreBelow { + return + } + h.ignoreBelow = pn + h.packetHistory.DeleteBelow(pn) + if h.logger.Debug() { + h.logger.Debugf("\tIgnoring all packets below %d.", pn) + } +} + +// isMissing says if a packet was reported missing in the last ACK. +func (h *appDataReceivedPacketTracker) isMissing(p protocol.PacketNumber) bool { + if h.lastAck == nil || p < h.ignoreBelow { + return false + } + return p < h.lastAck.LargestAcked() && !h.lastAck.AcksPacket(p) +} + +func (h *appDataReceivedPacketTracker) hasNewMissingPackets() bool { + if h.lastAck == nil { + return false + } + highestRange := h.packetHistory.GetHighestAckRange() + return highestRange.Smallest > h.lastAck.LargestAcked()+1 && highestRange.Len() == 1 +} + +func (h *appDataReceivedPacketTracker) shouldQueueACK(pn protocol.PacketNumber, ecn protocol.ECN, wasMissing bool) bool { + // always acknowledge the first packet + if h.lastAck == nil { + h.logger.Debugf("\tQueueing ACK because the first packet should be acknowledged.") + return true + } + + // Send an ACK if this packet was reported missing in an ACK sent before. + // Ack decimation with reordering relies on the timer to send an ACK, but if + // missing packets we reported in the previous ACK, send an ACK immediately. + if wasMissing { + if h.logger.Debug() { + h.logger.Debugf("\tQueueing ACK because packet %d was missing before.", pn) + } + return true + } + + // send an ACK every 2 ack-eliciting packets + if h.ackElicitingPacketsReceivedSinceLastAck >= packetsBeforeAck { + if h.logger.Debug() { + h.logger.Debugf("\tQueueing ACK because packet %d packets were received after the last ACK (using initial threshold: %d).", h.ackElicitingPacketsReceivedSinceLastAck, packetsBeforeAck) + } + return true + } + + // queue an ACK if there are new missing packets to report + if h.hasNewMissingPackets() { + h.logger.Debugf("\tQueuing ACK because there's a new missing packet to report.") + return true + } + + // queue an ACK if the packet was ECN-CE marked + if ecn == protocol.ECNCE { + h.logger.Debugf("\tQueuing ACK because the packet was ECN-CE marked.") + return true + } + return false +} + +func (h *appDataReceivedPacketTracker) GetAckFrame(onlyIfQueued bool) *wire.AckFrame { + now := time.Now() + if onlyIfQueued && !h.ackQueued { + if h.ackAlarm.IsZero() || h.ackAlarm.After(now) { + return nil + } + if h.logger.Debug() && !h.ackAlarm.IsZero() { + h.logger.Debugf("Sending ACK because the ACK timer expired.") + } + } + ack := h.receivedPacketTracker.GetAckFrame() + if ack == nil { + return nil + } + ack.DelayTime = max(0, now.Sub(h.largestObservedRcvdTime)) + h.ackQueued = false + h.ackAlarm = time.Time{} + h.ackElicitingPacketsReceivedSinceLastAck = 0 + return ack +} + +func (h *appDataReceivedPacketTracker) GetAlarmTimeout() time.Time { return h.ackAlarm } diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/send_mode.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/send_mode.go new file mode 100644 index 00000000..c03f3a6f --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/send_mode.go @@ -0,0 +1,46 @@ +package ackhandler + +import "fmt" + +// The SendMode says what kind of packets can be sent. +type SendMode uint8 + +const ( + // SendNone means that no packets should be sent + SendNone SendMode = iota + // SendAck means an ACK-only packet should be sent + SendAck + // SendPTOInitial means that an Initial probe packet should be sent + SendPTOInitial + // SendPTOHandshake means that a Handshake probe packet should be sent + SendPTOHandshake + // SendPTOAppData means that an Application data probe packet should be sent + SendPTOAppData + // SendPacingLimited means that the pacer doesn't allow sending of a packet right now, + // but will do in a little while. + // The timestamp when sending is allowed again can be obtained via the SentPacketHandler.TimeUntilSend. + SendPacingLimited + // SendAny means that any packet should be sent + SendAny +) + +func (s SendMode) String() string { + switch s { + case SendNone: + return "none" + case SendAck: + return "ack" + case SendPTOInitial: + return "pto (Initial)" + case SendPTOHandshake: + return "pto (Handshake)" + case SendPTOAppData: + return "pto (Application Data)" + case SendAny: + return "any" + case SendPacingLimited: + return "pacing limited" + default: + return fmt.Sprintf("invalid send mode: %d", s) + } +} diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go new file mode 100644 index 00000000..b84f0dcb --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go @@ -0,0 +1,928 @@ +package ackhandler + +import ( + "errors" + "fmt" + "time" + + "github.com/quic-go/quic-go/internal/congestion" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" + "github.com/quic-go/quic-go/logging" +) + +const ( + // Maximum reordering in time space before time based loss detection considers a packet lost. + // Specified as an RTT multiplier. + timeThreshold = 9.0 / 8 + // Maximum reordering in packets before packet threshold loss detection considers a packet lost. + packetThreshold = 3 + // Before validating the client's address, the server won't send more than 3x bytes than it received. + amplificationFactor = 3 + // We use Retry packets to derive an RTT estimate. Make sure we don't set the RTT to a super low value yet. + minRTTAfterRetry = 5 * time.Millisecond + // The PTO duration uses exponential backoff, but is truncated to a maximum value, as allowed by RFC 8961, section 4.4. + maxPTODuration = 60 * time.Second +) + +type packetNumberSpace struct { + history sentPacketHistory + pns packetNumberGenerator + + lossTime time.Time + lastAckElicitingPacketTime time.Time + + largestAcked protocol.PacketNumber + largestSent protocol.PacketNumber +} + +func newPacketNumberSpace(initialPN protocol.PacketNumber, isAppData bool) *packetNumberSpace { + var pns packetNumberGenerator + if isAppData { + pns = newSkippingPacketNumberGenerator(initialPN, protocol.SkipPacketInitialPeriod, protocol.SkipPacketMaxPeriod) + } else { + pns = newSequentialPacketNumberGenerator(initialPN) + } + return &packetNumberSpace{ + history: *newSentPacketHistory(isAppData), + pns: pns, + largestSent: protocol.InvalidPacketNumber, + largestAcked: protocol.InvalidPacketNumber, + } +} + +type sentPacketHandler struct { + initialPackets *packetNumberSpace + handshakePackets *packetNumberSpace + appDataPackets *packetNumberSpace + + // Do we know that the peer completed address validation yet? + // Always true for the server. + peerCompletedAddressValidation bool + bytesReceived protocol.ByteCount + bytesSent protocol.ByteCount + // Have we validated the peer's address yet? + // Always true for the client. + peerAddressValidated bool + + handshakeConfirmed bool + + // lowestNotConfirmedAcked is the lowest packet number that we sent an ACK for, but haven't received confirmation, that this ACK actually arrived + // example: we send an ACK for packets 90-100 with packet number 20 + // once we receive an ACK from the peer for packet 20, the lowestNotConfirmedAcked is 101 + // Only applies to the application-data packet number space. + lowestNotConfirmedAcked protocol.PacketNumber + + ackedPackets []*packet // to avoid allocations in detectAndRemoveAckedPackets + + bytesInFlight protocol.ByteCount + + congestion congestion.SendAlgorithmWithDebugInfos + rttStats *utils.RTTStats + + // The number of times a PTO has been sent without receiving an ack. + ptoCount uint32 + ptoMode SendMode + // The number of PTO probe packets that should be sent. + // Only applies to the application-data packet number space. + numProbesToSend int + + // The alarm timeout + alarm time.Time + + enableECN bool + ecnTracker ecnHandler + + perspective protocol.Perspective + + tracer *logging.ConnectionTracer + logger utils.Logger +} + +var ( + _ SentPacketHandler = &sentPacketHandler{} + _ sentPacketTracker = &sentPacketHandler{} +) + +// clientAddressValidated indicates whether the address was validated beforehand by an address validation token. +// If the address was validated, the amplification limit doesn't apply. It has no effect for a client. +func newSentPacketHandler( + initialPN protocol.PacketNumber, + initialMaxDatagramSize protocol.ByteCount, + rttStats *utils.RTTStats, + clientAddressValidated bool, + enableECN bool, + pers protocol.Perspective, + tracer *logging.ConnectionTracer, + logger utils.Logger, +) *sentPacketHandler { + congestion := congestion.NewCubicSender( + congestion.DefaultClock{}, + rttStats, + initialMaxDatagramSize, + true, // use Reno + tracer, + ) + + h := &sentPacketHandler{ + peerCompletedAddressValidation: pers == protocol.PerspectiveServer, + peerAddressValidated: pers == protocol.PerspectiveClient || clientAddressValidated, + initialPackets: newPacketNumberSpace(initialPN, false), + handshakePackets: newPacketNumberSpace(0, false), + appDataPackets: newPacketNumberSpace(0, true), + rttStats: rttStats, + congestion: congestion, + perspective: pers, + tracer: tracer, + logger: logger, + } + if enableECN { + h.enableECN = true + h.ecnTracker = newECNTracker(logger, tracer) + } + return h +} + +func (h *sentPacketHandler) removeFromBytesInFlight(p *packet) { + if p.includedInBytesInFlight { + if p.Length > h.bytesInFlight { + panic("negative bytes_in_flight") + } + h.bytesInFlight -= p.Length + p.includedInBytesInFlight = false + } +} + +func (h *sentPacketHandler) DropPackets(encLevel protocol.EncryptionLevel) { + // The server won't await address validation after the handshake is confirmed. + // This applies even if we didn't receive an ACK for a Handshake packet. + if h.perspective == protocol.PerspectiveClient && encLevel == protocol.EncryptionHandshake { + h.peerCompletedAddressValidation = true + } + // remove outstanding packets from bytes_in_flight + if encLevel == protocol.EncryptionInitial || encLevel == protocol.EncryptionHandshake { + pnSpace := h.getPacketNumberSpace(encLevel) + // We might already have dropped this packet number space. + if pnSpace == nil { + return + } + pnSpace.history.Iterate(func(p *packet) (bool, error) { + h.removeFromBytesInFlight(p) + return true, nil + }) + } + // drop the packet history + //nolint:exhaustive // Not every packet number space can be dropped. + switch encLevel { + case protocol.EncryptionInitial: + h.initialPackets = nil + case protocol.EncryptionHandshake: + h.handshakePackets = nil + case protocol.Encryption0RTT: + // This function is only called when 0-RTT is rejected, + // and not when the client drops 0-RTT keys when the handshake completes. + // When 0-RTT is rejected, all application data sent so far becomes invalid. + // Delete the packets from the history and remove them from bytes_in_flight. + h.appDataPackets.history.Iterate(func(p *packet) (bool, error) { + if p.EncryptionLevel != protocol.Encryption0RTT && !p.skippedPacket { + return false, nil + } + h.removeFromBytesInFlight(p) + h.appDataPackets.history.Remove(p.PacketNumber) + return true, nil + }) + default: + panic(fmt.Sprintf("Cannot drop keys for encryption level %s", encLevel)) + } + if h.tracer != nil && h.tracer.UpdatedPTOCount != nil && h.ptoCount != 0 { + h.tracer.UpdatedPTOCount(0) + } + h.ptoCount = 0 + h.numProbesToSend = 0 + h.ptoMode = SendNone + h.setLossDetectionTimer() +} + +func (h *sentPacketHandler) ReceivedBytes(n protocol.ByteCount) { + wasAmplificationLimit := h.isAmplificationLimited() + h.bytesReceived += n + if wasAmplificationLimit && !h.isAmplificationLimited() { + h.setLossDetectionTimer() + } +} + +func (h *sentPacketHandler) ReceivedPacket(l protocol.EncryptionLevel) { + if h.perspective == protocol.PerspectiveServer && l == protocol.EncryptionHandshake && !h.peerAddressValidated { + h.peerAddressValidated = true + h.setLossDetectionTimer() + } +} + +func (h *sentPacketHandler) packetsInFlight() int { + packetsInFlight := h.appDataPackets.history.Len() + if h.handshakePackets != nil { + packetsInFlight += h.handshakePackets.history.Len() + } + if h.initialPackets != nil { + packetsInFlight += h.initialPackets.history.Len() + } + return packetsInFlight +} + +func (h *sentPacketHandler) SentPacket( + t time.Time, + pn, largestAcked protocol.PacketNumber, + streamFrames []StreamFrame, + frames []Frame, + encLevel protocol.EncryptionLevel, + ecn protocol.ECN, + size protocol.ByteCount, + isPathMTUProbePacket bool, +) { + h.bytesSent += size + + pnSpace := h.getPacketNumberSpace(encLevel) + if h.logger.Debug() && pnSpace.history.HasOutstandingPackets() { + for p := max(0, pnSpace.largestSent+1); p < pn; p++ { + h.logger.Debugf("Skipping packet number %d", p) + } + } + + pnSpace.largestSent = pn + isAckEliciting := len(streamFrames) > 0 || len(frames) > 0 + + if isAckEliciting { + pnSpace.lastAckElicitingPacketTime = t + h.bytesInFlight += size + if h.numProbesToSend > 0 { + h.numProbesToSend-- + } + } + h.congestion.OnPacketSent(t, h.bytesInFlight, pn, size, isAckEliciting) + + if encLevel == protocol.Encryption1RTT && h.ecnTracker != nil { + h.ecnTracker.SentPacket(pn, ecn) + } + + if !isAckEliciting { + pnSpace.history.SentNonAckElicitingPacket(pn) + if !h.peerCompletedAddressValidation { + h.setLossDetectionTimer() + } + return + } + + p := getPacket() + p.SendTime = t + p.PacketNumber = pn + p.EncryptionLevel = encLevel + p.Length = size + p.LargestAcked = largestAcked + p.StreamFrames = streamFrames + p.Frames = frames + p.IsPathMTUProbePacket = isPathMTUProbePacket + p.includedInBytesInFlight = true + + pnSpace.history.SentAckElicitingPacket(p) + if h.tracer != nil && h.tracer.UpdatedMetrics != nil { + h.tracer.UpdatedMetrics(h.rttStats, h.congestion.GetCongestionWindow(), h.bytesInFlight, h.packetsInFlight()) + } + h.setLossDetectionTimer() +} + +func (h *sentPacketHandler) getPacketNumberSpace(encLevel protocol.EncryptionLevel) *packetNumberSpace { + switch encLevel { + case protocol.EncryptionInitial: + return h.initialPackets + case protocol.EncryptionHandshake: + return h.handshakePackets + case protocol.Encryption0RTT, protocol.Encryption1RTT: + return h.appDataPackets + default: + panic("invalid packet number space") + } +} + +func (h *sentPacketHandler) ReceivedAck(ack *wire.AckFrame, encLevel protocol.EncryptionLevel, rcvTime time.Time) (bool /* contained 1-RTT packet */, error) { + pnSpace := h.getPacketNumberSpace(encLevel) + + largestAcked := ack.LargestAcked() + if largestAcked > pnSpace.largestSent { + return false, &qerr.TransportError{ + ErrorCode: qerr.ProtocolViolation, + ErrorMessage: "received ACK for an unsent packet", + } + } + + // Servers complete address validation when a protected packet is received. + if h.perspective == protocol.PerspectiveClient && !h.peerCompletedAddressValidation && + (encLevel == protocol.EncryptionHandshake || encLevel == protocol.Encryption1RTT) { + h.peerCompletedAddressValidation = true + h.logger.Debugf("Peer doesn't await address validation any longer.") + // Make sure that the timer is reset, even if this ACK doesn't acknowledge any (ack-eliciting) packets. + h.setLossDetectionTimer() + } + + priorInFlight := h.bytesInFlight + ackedPackets, err := h.detectAndRemoveAckedPackets(ack, encLevel) + if err != nil || len(ackedPackets) == 0 { + return false, err + } + // update the RTT, if the largest acked is newly acknowledged + if len(ackedPackets) > 0 { + if p := ackedPackets[len(ackedPackets)-1]; p.PacketNumber == ack.LargestAcked() { + // don't use the ack delay for Initial and Handshake packets + var ackDelay time.Duration + if encLevel == protocol.Encryption1RTT { + ackDelay = min(ack.DelayTime, h.rttStats.MaxAckDelay()) + } + h.rttStats.UpdateRTT(rcvTime.Sub(p.SendTime), ackDelay, rcvTime) + if h.logger.Debug() { + h.logger.Debugf("\tupdated RTT: %s (σ: %s)", h.rttStats.SmoothedRTT(), h.rttStats.MeanDeviation()) + } + h.congestion.MaybeExitSlowStart() + } + } + + // Only inform the ECN tracker about new 1-RTT ACKs if the ACK increases the largest acked. + if encLevel == protocol.Encryption1RTT && h.ecnTracker != nil && largestAcked > pnSpace.largestAcked { + congested := h.ecnTracker.HandleNewlyAcked(ackedPackets, int64(ack.ECT0), int64(ack.ECT1), int64(ack.ECNCE)) + if congested { + h.congestion.OnCongestionEvent(largestAcked, 0, priorInFlight) + } + } + + pnSpace.largestAcked = max(pnSpace.largestAcked, largestAcked) + + if err := h.detectLostPackets(rcvTime, encLevel); err != nil { + return false, err + } + var acked1RTTPacket bool + for _, p := range ackedPackets { + if p.includedInBytesInFlight && !p.declaredLost { + h.congestion.OnPacketAcked(p.PacketNumber, p.Length, priorInFlight, rcvTime) + } + if p.EncryptionLevel == protocol.Encryption1RTT { + acked1RTTPacket = true + } + h.removeFromBytesInFlight(p) + putPacket(p) + } + // After this point, we must not use ackedPackets any longer! + // We've already returned the buffers. + ackedPackets = nil //nolint:ineffassign // This is just to be on the safe side. + + // Reset the pto_count unless the client is unsure if the server has validated the client's address. + if h.peerCompletedAddressValidation { + if h.tracer != nil && h.tracer.UpdatedPTOCount != nil && h.ptoCount != 0 { + h.tracer.UpdatedPTOCount(0) + } + h.ptoCount = 0 + } + h.numProbesToSend = 0 + + if h.tracer != nil && h.tracer.UpdatedMetrics != nil { + h.tracer.UpdatedMetrics(h.rttStats, h.congestion.GetCongestionWindow(), h.bytesInFlight, h.packetsInFlight()) + } + + h.setLossDetectionTimer() + return acked1RTTPacket, nil +} + +func (h *sentPacketHandler) GetLowestPacketNotConfirmedAcked() protocol.PacketNumber { + return h.lowestNotConfirmedAcked +} + +// Packets are returned in ascending packet number order. +func (h *sentPacketHandler) detectAndRemoveAckedPackets(ack *wire.AckFrame, encLevel protocol.EncryptionLevel) ([]*packet, error) { + pnSpace := h.getPacketNumberSpace(encLevel) + h.ackedPackets = h.ackedPackets[:0] + ackRangeIndex := 0 + lowestAcked := ack.LowestAcked() + largestAcked := ack.LargestAcked() + err := pnSpace.history.Iterate(func(p *packet) (bool, error) { + // Ignore packets below the lowest acked + if p.PacketNumber < lowestAcked { + return true, nil + } + // Break after largest acked is reached + if p.PacketNumber > largestAcked { + return false, nil + } + + if ack.HasMissingRanges() { + ackRange := ack.AckRanges[len(ack.AckRanges)-1-ackRangeIndex] + + for p.PacketNumber > ackRange.Largest && ackRangeIndex < len(ack.AckRanges)-1 { + ackRangeIndex++ + ackRange = ack.AckRanges[len(ack.AckRanges)-1-ackRangeIndex] + } + + if p.PacketNumber < ackRange.Smallest { // packet not contained in ACK range + return true, nil + } + if p.PacketNumber > ackRange.Largest { + return false, fmt.Errorf("BUG: ackhandler would have acked wrong packet %d, while evaluating range %d -> %d", p.PacketNumber, ackRange.Smallest, ackRange.Largest) + } + } + if p.skippedPacket { + return false, &qerr.TransportError{ + ErrorCode: qerr.ProtocolViolation, + ErrorMessage: fmt.Sprintf("received an ACK for skipped packet number: %d (%s)", p.PacketNumber, encLevel), + } + } + h.ackedPackets = append(h.ackedPackets, p) + return true, nil + }) + if h.logger.Debug() && len(h.ackedPackets) > 0 { + pns := make([]protocol.PacketNumber, len(h.ackedPackets)) + for i, p := range h.ackedPackets { + pns[i] = p.PacketNumber + } + h.logger.Debugf("\tnewly acked packets (%d): %d", len(pns), pns) + } + + for _, p := range h.ackedPackets { + if p.LargestAcked != protocol.InvalidPacketNumber && encLevel == protocol.Encryption1RTT { + h.lowestNotConfirmedAcked = max(h.lowestNotConfirmedAcked, p.LargestAcked+1) + } + + for _, f := range p.Frames { + if f.Handler != nil { + f.Handler.OnAcked(f.Frame) + } + } + for _, f := range p.StreamFrames { + if f.Handler != nil { + f.Handler.OnAcked(f.Frame) + } + } + if err := pnSpace.history.Remove(p.PacketNumber); err != nil { + return nil, err + } + if h.tracer != nil && h.tracer.AcknowledgedPacket != nil { + h.tracer.AcknowledgedPacket(encLevel, p.PacketNumber) + } + } + + return h.ackedPackets, err +} + +func (h *sentPacketHandler) getLossTimeAndSpace() (time.Time, protocol.EncryptionLevel) { + var encLevel protocol.EncryptionLevel + var lossTime time.Time + + if h.initialPackets != nil { + lossTime = h.initialPackets.lossTime + encLevel = protocol.EncryptionInitial + } + if h.handshakePackets != nil && (lossTime.IsZero() || (!h.handshakePackets.lossTime.IsZero() && h.handshakePackets.lossTime.Before(lossTime))) { + lossTime = h.handshakePackets.lossTime + encLevel = protocol.EncryptionHandshake + } + if lossTime.IsZero() || (!h.appDataPackets.lossTime.IsZero() && h.appDataPackets.lossTime.Before(lossTime)) { + lossTime = h.appDataPackets.lossTime + encLevel = protocol.Encryption1RTT + } + return lossTime, encLevel +} + +func (h *sentPacketHandler) getScaledPTO(includeMaxAckDelay bool) time.Duration { + pto := h.rttStats.PTO(includeMaxAckDelay) << h.ptoCount + if pto > maxPTODuration || pto <= 0 { + return maxPTODuration + } + return pto +} + +// same logic as getLossTimeAndSpace, but for lastAckElicitingPacketTime instead of lossTime +func (h *sentPacketHandler) getPTOTimeAndSpace() (pto time.Time, encLevel protocol.EncryptionLevel, ok bool) { + // We only send application data probe packets once the handshake is confirmed, + // because before that, we don't have the keys to decrypt ACKs sent in 1-RTT packets. + if !h.handshakeConfirmed && !h.hasOutstandingCryptoPackets() { + if h.peerCompletedAddressValidation { + return + } + t := time.Now().Add(h.getScaledPTO(false)) + if h.initialPackets != nil { + return t, protocol.EncryptionInitial, true + } + return t, protocol.EncryptionHandshake, true + } + + if h.initialPackets != nil { + encLevel = protocol.EncryptionInitial + if t := h.initialPackets.lastAckElicitingPacketTime; !t.IsZero() { + pto = t.Add(h.getScaledPTO(false)) + } + } + if h.handshakePackets != nil && !h.handshakePackets.lastAckElicitingPacketTime.IsZero() { + t := h.handshakePackets.lastAckElicitingPacketTime.Add(h.getScaledPTO(false)) + if pto.IsZero() || (!t.IsZero() && t.Before(pto)) { + pto = t + encLevel = protocol.EncryptionHandshake + } + } + if h.handshakeConfirmed && !h.appDataPackets.lastAckElicitingPacketTime.IsZero() { + t := h.appDataPackets.lastAckElicitingPacketTime.Add(h.getScaledPTO(true)) + if pto.IsZero() || (!t.IsZero() && t.Before(pto)) { + pto = t + encLevel = protocol.Encryption1RTT + } + } + return pto, encLevel, true +} + +func (h *sentPacketHandler) hasOutstandingCryptoPackets() bool { + if h.initialPackets != nil && h.initialPackets.history.HasOutstandingPackets() { + return true + } + if h.handshakePackets != nil && h.handshakePackets.history.HasOutstandingPackets() { + return true + } + return false +} + +func (h *sentPacketHandler) hasOutstandingPackets() bool { + return h.appDataPackets.history.HasOutstandingPackets() || h.hasOutstandingCryptoPackets() +} + +func (h *sentPacketHandler) setLossDetectionTimer() { + oldAlarm := h.alarm // only needed in case tracing is enabled + lossTime, encLevel := h.getLossTimeAndSpace() + if !lossTime.IsZero() { + // Early retransmit timer or time loss detection. + h.alarm = lossTime + if h.tracer != nil && h.tracer.SetLossTimer != nil && h.alarm != oldAlarm { + h.tracer.SetLossTimer(logging.TimerTypeACK, encLevel, h.alarm) + } + return + } + + // Cancel the alarm if amplification limited. + if h.isAmplificationLimited() { + h.alarm = time.Time{} + if !oldAlarm.IsZero() { + h.logger.Debugf("Canceling loss detection timer. Amplification limited.") + if h.tracer != nil && h.tracer.LossTimerCanceled != nil { + h.tracer.LossTimerCanceled() + } + } + return + } + + // Cancel the alarm if no packets are outstanding + if !h.hasOutstandingPackets() && h.peerCompletedAddressValidation { + h.alarm = time.Time{} + if !oldAlarm.IsZero() { + h.logger.Debugf("Canceling loss detection timer. No packets in flight.") + if h.tracer != nil && h.tracer.LossTimerCanceled != nil { + h.tracer.LossTimerCanceled() + } + } + return + } + + // PTO alarm + ptoTime, encLevel, ok := h.getPTOTimeAndSpace() + if !ok { + if !oldAlarm.IsZero() { + h.alarm = time.Time{} + h.logger.Debugf("Canceling loss detection timer. No PTO needed..") + if h.tracer != nil && h.tracer.LossTimerCanceled != nil { + h.tracer.LossTimerCanceled() + } + } + return + } + h.alarm = ptoTime + if h.tracer != nil && h.tracer.SetLossTimer != nil && h.alarm != oldAlarm { + h.tracer.SetLossTimer(logging.TimerTypePTO, encLevel, h.alarm) + } +} + +func (h *sentPacketHandler) detectLostPackets(now time.Time, encLevel protocol.EncryptionLevel) error { + pnSpace := h.getPacketNumberSpace(encLevel) + pnSpace.lossTime = time.Time{} + + maxRTT := float64(max(h.rttStats.LatestRTT(), h.rttStats.SmoothedRTT())) + lossDelay := time.Duration(timeThreshold * maxRTT) + + // Minimum time of granularity before packets are deemed lost. + lossDelay = max(lossDelay, protocol.TimerGranularity) + + // Packets sent before this time are deemed lost. + lostSendTime := now.Add(-lossDelay) + + priorInFlight := h.bytesInFlight + return pnSpace.history.Iterate(func(p *packet) (bool, error) { + if p.PacketNumber > pnSpace.largestAcked { + return false, nil + } + + var packetLost bool + if p.SendTime.Before(lostSendTime) { + packetLost = true + if !p.skippedPacket { + if h.logger.Debug() { + h.logger.Debugf("\tlost packet %d (time threshold)", p.PacketNumber) + } + if h.tracer != nil && h.tracer.LostPacket != nil { + h.tracer.LostPacket(p.EncryptionLevel, p.PacketNumber, logging.PacketLossTimeThreshold) + } + } + } else if pnSpace.largestAcked >= p.PacketNumber+packetThreshold { + packetLost = true + if !p.skippedPacket { + if h.logger.Debug() { + h.logger.Debugf("\tlost packet %d (reordering threshold)", p.PacketNumber) + } + if h.tracer != nil && h.tracer.LostPacket != nil { + h.tracer.LostPacket(p.EncryptionLevel, p.PacketNumber, logging.PacketLossReorderingThreshold) + } + } + } else if pnSpace.lossTime.IsZero() { + // Note: This conditional is only entered once per call + lossTime := p.SendTime.Add(lossDelay) + if h.logger.Debug() { + h.logger.Debugf("\tsetting loss timer for packet %d (%s) to %s (in %s)", p.PacketNumber, encLevel, lossDelay, lossTime) + } + pnSpace.lossTime = lossTime + } + if packetLost { + pnSpace.history.DeclareLost(p.PacketNumber) + if !p.skippedPacket { + // the bytes in flight need to be reduced no matter if the frames in this packet will be retransmitted + h.removeFromBytesInFlight(p) + h.queueFramesForRetransmission(p) + if !p.IsPathMTUProbePacket { + h.congestion.OnCongestionEvent(p.PacketNumber, p.Length, priorInFlight) + } + if encLevel == protocol.Encryption1RTT && h.ecnTracker != nil { + h.ecnTracker.LostPacket(p.PacketNumber) + } + } + } + return true, nil + }) +} + +func (h *sentPacketHandler) OnLossDetectionTimeout() error { + defer h.setLossDetectionTimer() + earliestLossTime, encLevel := h.getLossTimeAndSpace() + if !earliestLossTime.IsZero() { + if h.logger.Debug() { + h.logger.Debugf("Loss detection alarm fired in loss timer mode. Loss time: %s", earliestLossTime) + } + if h.tracer != nil && h.tracer.LossTimerExpired != nil { + h.tracer.LossTimerExpired(logging.TimerTypeACK, encLevel) + } + // Early retransmit or time loss detection + return h.detectLostPackets(time.Now(), encLevel) + } + + // PTO + // When all outstanding are acknowledged, the alarm is canceled in + // setLossDetectionTimer. This doesn't reset the timer in the session though. + // When OnAlarm is called, we therefore need to make sure that there are + // actually packets outstanding. + if h.bytesInFlight == 0 && !h.peerCompletedAddressValidation { + h.ptoCount++ + h.numProbesToSend++ + if h.initialPackets != nil { + h.ptoMode = SendPTOInitial + } else if h.handshakePackets != nil { + h.ptoMode = SendPTOHandshake + } else { + return errors.New("sentPacketHandler BUG: PTO fired, but bytes_in_flight is 0 and Initial and Handshake already dropped") + } + return nil + } + + _, encLevel, ok := h.getPTOTimeAndSpace() + if !ok { + return nil + } + if ps := h.getPacketNumberSpace(encLevel); !ps.history.HasOutstandingPackets() && !h.peerCompletedAddressValidation { + return nil + } + h.ptoCount++ + if h.logger.Debug() { + h.logger.Debugf("Loss detection alarm for %s fired in PTO mode. PTO count: %d", encLevel, h.ptoCount) + } + if h.tracer != nil { + if h.tracer.LossTimerExpired != nil { + h.tracer.LossTimerExpired(logging.TimerTypePTO, encLevel) + } + if h.tracer.UpdatedPTOCount != nil { + h.tracer.UpdatedPTOCount(h.ptoCount) + } + } + h.numProbesToSend += 2 + //nolint:exhaustive // We never arm a PTO timer for 0-RTT packets. + switch encLevel { + case protocol.EncryptionInitial: + h.ptoMode = SendPTOInitial + case protocol.EncryptionHandshake: + h.ptoMode = SendPTOHandshake + case protocol.Encryption1RTT: + // skip a packet number in order to elicit an immediate ACK + pn := h.PopPacketNumber(protocol.Encryption1RTT) + h.getPacketNumberSpace(protocol.Encryption1RTT).history.SkippedPacket(pn) + h.ptoMode = SendPTOAppData + default: + return fmt.Errorf("PTO timer in unexpected encryption level: %s", encLevel) + } + return nil +} + +func (h *sentPacketHandler) GetLossDetectionTimeout() time.Time { + return h.alarm +} + +func (h *sentPacketHandler) ECNMode(isShortHeaderPacket bool) protocol.ECN { + if !h.enableECN { + return protocol.ECNUnsupported + } + if !isShortHeaderPacket { + return protocol.ECNNon + } + return h.ecnTracker.Mode() +} + +func (h *sentPacketHandler) PeekPacketNumber(encLevel protocol.EncryptionLevel) (protocol.PacketNumber, protocol.PacketNumberLen) { + pnSpace := h.getPacketNumberSpace(encLevel) + pn := pnSpace.pns.Peek() + // See section 17.1 of RFC 9000. + return pn, protocol.PacketNumberLengthForHeader(pn, pnSpace.largestAcked) +} + +func (h *sentPacketHandler) PopPacketNumber(encLevel protocol.EncryptionLevel) protocol.PacketNumber { + pnSpace := h.getPacketNumberSpace(encLevel) + skipped, pn := pnSpace.pns.Pop() + if skipped { + skippedPN := pn - 1 + pnSpace.history.SkippedPacket(skippedPN) + if h.logger.Debug() { + h.logger.Debugf("Skipping packet number %d", skippedPN) + } + } + return pn +} + +func (h *sentPacketHandler) SendMode(now time.Time) SendMode { + numTrackedPackets := h.appDataPackets.history.Len() + if h.initialPackets != nil { + numTrackedPackets += h.initialPackets.history.Len() + } + if h.handshakePackets != nil { + numTrackedPackets += h.handshakePackets.history.Len() + } + + if h.isAmplificationLimited() { + h.logger.Debugf("Amplification window limited. Received %d bytes, already sent out %d bytes", h.bytesReceived, h.bytesSent) + return SendNone + } + // Don't send any packets if we're keeping track of the maximum number of packets. + // Note that since MaxOutstandingSentPackets is smaller than MaxTrackedSentPackets, + // we will stop sending out new data when reaching MaxOutstandingSentPackets, + // but still allow sending of retransmissions and ACKs. + if numTrackedPackets >= protocol.MaxTrackedSentPackets { + if h.logger.Debug() { + h.logger.Debugf("Limited by the number of tracked packets: tracking %d packets, maximum %d", numTrackedPackets, protocol.MaxTrackedSentPackets) + } + return SendNone + } + if h.numProbesToSend > 0 { + return h.ptoMode + } + // Only send ACKs if we're congestion limited. + if !h.congestion.CanSend(h.bytesInFlight) { + if h.logger.Debug() { + h.logger.Debugf("Congestion limited: bytes in flight %d, window %d", h.bytesInFlight, h.congestion.GetCongestionWindow()) + } + return SendAck + } + if numTrackedPackets >= protocol.MaxOutstandingSentPackets { + if h.logger.Debug() { + h.logger.Debugf("Max outstanding limited: tracking %d packets, maximum: %d", numTrackedPackets, protocol.MaxOutstandingSentPackets) + } + return SendAck + } + if !h.congestion.HasPacingBudget(now) { + return SendPacingLimited + } + return SendAny +} + +func (h *sentPacketHandler) TimeUntilSend() time.Time { + return h.congestion.TimeUntilSend(h.bytesInFlight) +} + +func (h *sentPacketHandler) SetMaxDatagramSize(s protocol.ByteCount) { + h.congestion.SetMaxDatagramSize(s) +} + +func (h *sentPacketHandler) isAmplificationLimited() bool { + if h.peerAddressValidated { + return false + } + return h.bytesSent >= amplificationFactor*h.bytesReceived +} + +func (h *sentPacketHandler) QueueProbePacket(encLevel protocol.EncryptionLevel) bool { + pnSpace := h.getPacketNumberSpace(encLevel) + p := pnSpace.history.FirstOutstanding() + if p == nil { + return false + } + h.queueFramesForRetransmission(p) + // TODO: don't declare the packet lost here. + // Keep track of acknowledged frames instead. + h.removeFromBytesInFlight(p) + pnSpace.history.DeclareLost(p.PacketNumber) + return true +} + +func (h *sentPacketHandler) queueFramesForRetransmission(p *packet) { + if len(p.Frames) == 0 && len(p.StreamFrames) == 0 { + panic("no frames") + } + for _, f := range p.Frames { + if f.Handler != nil { + f.Handler.OnLost(f.Frame) + } + } + for _, f := range p.StreamFrames { + if f.Handler != nil { + f.Handler.OnLost(f.Frame) + } + } + p.StreamFrames = nil + p.Frames = nil +} + +func (h *sentPacketHandler) ResetForRetry(now time.Time) error { + h.bytesInFlight = 0 + var firstPacketSendTime time.Time + h.initialPackets.history.Iterate(func(p *packet) (bool, error) { + if firstPacketSendTime.IsZero() { + firstPacketSendTime = p.SendTime + } + if p.declaredLost || p.skippedPacket { + return true, nil + } + h.queueFramesForRetransmission(p) + return true, nil + }) + // All application data packets sent at this point are 0-RTT packets. + // In the case of a Retry, we can assume that the server dropped all of them. + h.appDataPackets.history.Iterate(func(p *packet) (bool, error) { + if !p.declaredLost && !p.skippedPacket { + h.queueFramesForRetransmission(p) + } + return true, nil + }) + + // Only use the Retry to estimate the RTT if we didn't send any retransmission for the Initial. + // Otherwise, we don't know which Initial the Retry was sent in response to. + if h.ptoCount == 0 { + // Don't set the RTT to a value lower than 5ms here. + h.rttStats.UpdateRTT(max(minRTTAfterRetry, now.Sub(firstPacketSendTime)), 0, now) + if h.logger.Debug() { + h.logger.Debugf("\tupdated RTT: %s (σ: %s)", h.rttStats.SmoothedRTT(), h.rttStats.MeanDeviation()) + } + if h.tracer != nil && h.tracer.UpdatedMetrics != nil { + h.tracer.UpdatedMetrics(h.rttStats, h.congestion.GetCongestionWindow(), h.bytesInFlight, h.packetsInFlight()) + } + } + h.initialPackets = newPacketNumberSpace(h.initialPackets.pns.Peek(), false) + h.appDataPackets = newPacketNumberSpace(h.appDataPackets.pns.Peek(), true) + oldAlarm := h.alarm + h.alarm = time.Time{} + if h.tracer != nil { + if h.tracer.UpdatedPTOCount != nil { + h.tracer.UpdatedPTOCount(0) + } + if !oldAlarm.IsZero() && h.tracer.LossTimerCanceled != nil { + h.tracer.LossTimerCanceled() + } + } + h.ptoCount = 0 + return nil +} + +func (h *sentPacketHandler) SetHandshakeConfirmed() { + if h.initialPackets != nil { + panic("didn't drop initial correctly") + } + if h.handshakePackets != nil { + panic("didn't drop handshake correctly") + } + h.handshakeConfirmed = true + // We don't send PTOs for application data packets before the handshake completes. + // Make sure the timer is armed now, if necessary. + h.setLossDetectionTimer() +} diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go new file mode 100644 index 00000000..9968df6a --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go @@ -0,0 +1,182 @@ +package ackhandler + +import ( + "fmt" + + "github.com/quic-go/quic-go/internal/protocol" +) + +type sentPacketHistory struct { + packets []*packet + + numOutstanding int + + highestPacketNumber protocol.PacketNumber +} + +func newSentPacketHistory(isAppData bool) *sentPacketHistory { + h := &sentPacketHistory{ + highestPacketNumber: protocol.InvalidPacketNumber, + } + if isAppData { + h.packets = make([]*packet, 0, 32) + } else { + h.packets = make([]*packet, 0, 6) + } + return h +} + +func (h *sentPacketHistory) checkSequentialPacketNumberUse(pn protocol.PacketNumber) { + if h.highestPacketNumber != protocol.InvalidPacketNumber { + if pn != h.highestPacketNumber+1 { + panic("non-sequential packet number use") + } + } +} + +func (h *sentPacketHistory) SkippedPacket(pn protocol.PacketNumber) { + h.checkSequentialPacketNumberUse(pn) + h.highestPacketNumber = pn + h.packets = append(h.packets, &packet{ + PacketNumber: pn, + skippedPacket: true, + }) +} + +func (h *sentPacketHistory) SentNonAckElicitingPacket(pn protocol.PacketNumber) { + h.checkSequentialPacketNumberUse(pn) + h.highestPacketNumber = pn + if len(h.packets) > 0 { + h.packets = append(h.packets, nil) + } +} + +func (h *sentPacketHistory) SentAckElicitingPacket(p *packet) { + h.checkSequentialPacketNumberUse(p.PacketNumber) + h.highestPacketNumber = p.PacketNumber + h.packets = append(h.packets, p) + if p.outstanding() { + h.numOutstanding++ + } +} + +// Iterate iterates through all packets. +func (h *sentPacketHistory) Iterate(cb func(*packet) (cont bool, err error)) error { + for _, p := range h.packets { + if p == nil { + continue + } + cont, err := cb(p) + if err != nil { + return err + } + if !cont { + return nil + } + } + return nil +} + +// FirstOutstanding returns the first outstanding packet. +func (h *sentPacketHistory) FirstOutstanding() *packet { + if !h.HasOutstandingPackets() { + return nil + } + for _, p := range h.packets { + if p != nil && p.outstanding() { + return p + } + } + return nil +} + +func (h *sentPacketHistory) Len() int { + return len(h.packets) +} + +func (h *sentPacketHistory) Remove(pn protocol.PacketNumber) error { + idx, ok := h.getIndex(pn) + if !ok { + return fmt.Errorf("packet %d not found in sent packet history", pn) + } + p := h.packets[idx] + if p.outstanding() { + h.numOutstanding-- + if h.numOutstanding < 0 { + panic("negative number of outstanding packets") + } + } + h.packets[idx] = nil + // clean up all skipped packets directly before this packet number + for idx > 0 { + idx-- + p := h.packets[idx] + if p == nil || !p.skippedPacket { + break + } + h.packets[idx] = nil + } + if idx == 0 { + h.cleanupStart() + } + if len(h.packets) > 0 && h.packets[0] == nil { + panic("remove failed") + } + return nil +} + +// getIndex gets the index of packet p in the packets slice. +func (h *sentPacketHistory) getIndex(p protocol.PacketNumber) (int, bool) { + if len(h.packets) == 0 { + return 0, false + } + first := h.packets[0].PacketNumber + if p < first { + return 0, false + } + index := int(p - first) + if index > len(h.packets)-1 { + return 0, false + } + return index, true +} + +func (h *sentPacketHistory) HasOutstandingPackets() bool { + return h.numOutstanding > 0 +} + +// delete all nil entries at the beginning of the packets slice +func (h *sentPacketHistory) cleanupStart() { + for i, p := range h.packets { + if p != nil { + h.packets = h.packets[i:] + return + } + } + h.packets = h.packets[:0] +} + +func (h *sentPacketHistory) LowestPacketNumber() protocol.PacketNumber { + if len(h.packets) == 0 { + return protocol.InvalidPacketNumber + } + return h.packets[0].PacketNumber +} + +func (h *sentPacketHistory) DeclareLost(pn protocol.PacketNumber) { + idx, ok := h.getIndex(pn) + if !ok { + return + } + p := h.packets[idx] + if p.outstanding() { + h.numOutstanding-- + if h.numOutstanding < 0 { + panic("negative number of outstanding packets") + } + } + h.packets[idx] = nil + if idx == 0 { + h.cleanupStart() + } +} diff --git a/vendor/github.com/quic-go/quic-go/internal/congestion/bandwidth.go b/vendor/github.com/quic-go/quic-go/internal/congestion/bandwidth.go new file mode 100644 index 00000000..1d03abbb --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/congestion/bandwidth.go @@ -0,0 +1,25 @@ +package congestion + +import ( + "math" + "time" + + "github.com/quic-go/quic-go/internal/protocol" +) + +// Bandwidth of a connection +type Bandwidth uint64 + +const infBandwidth Bandwidth = math.MaxUint64 + +const ( + // BitsPerSecond is 1 bit per second + BitsPerSecond Bandwidth = 1 + // BytesPerSecond is 1 byte per second + BytesPerSecond = 8 * BitsPerSecond +) + +// BandwidthFromDelta calculates the bandwidth from a number of bytes and a time delta +func BandwidthFromDelta(bytes protocol.ByteCount, delta time.Duration) Bandwidth { + return Bandwidth(bytes) * Bandwidth(time.Second) / Bandwidth(delta) * BytesPerSecond +} diff --git a/vendor/github.com/quic-go/quic-go/internal/congestion/clock.go b/vendor/github.com/quic-go/quic-go/internal/congestion/clock.go new file mode 100644 index 00000000..405fae70 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/congestion/clock.go @@ -0,0 +1,18 @@ +package congestion + +import "time" + +// A Clock returns the current time +type Clock interface { + Now() time.Time +} + +// DefaultClock implements the Clock interface using the Go stdlib clock. +type DefaultClock struct{} + +var _ Clock = DefaultClock{} + +// Now gets the current time +func (DefaultClock) Now() time.Time { + return time.Now() +} diff --git a/vendor/github.com/quic-go/quic-go/internal/congestion/cubic.go b/vendor/github.com/quic-go/quic-go/internal/congestion/cubic.go new file mode 100644 index 00000000..b35d40d4 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/congestion/cubic.go @@ -0,0 +1,213 @@ +package congestion + +import ( + "math" + "time" + + "github.com/quic-go/quic-go/internal/protocol" +) + +// This cubic implementation is based on the one found in Chromiums's QUIC +// implementation, in the files net/quic/congestion_control/cubic.{hh,cc}. + +// Constants based on TCP defaults. +// The following constants are in 2^10 fractions of a second instead of ms to +// allow a 10 shift right to divide. + +// 1024*1024^3 (first 1024 is from 0.100^3) +// where 0.100 is 100 ms which is the scaling round trip time. +const ( + cubeScale = 40 + cubeCongestionWindowScale = 410 + cubeFactor = 1 << cubeScale / cubeCongestionWindowScale / maxDatagramSize + // TODO: when re-enabling cubic, make sure to use the actual packet size here + maxDatagramSize = protocol.ByteCount(protocol.InitialPacketSize) +) + +const defaultNumConnections = 1 + +// Default Cubic backoff factor +const beta float32 = 0.7 + +// Additional backoff factor when loss occurs in the concave part of the Cubic +// curve. This additional backoff factor is expected to give up bandwidth to +// new concurrent flows and speed up convergence. +const betaLastMax float32 = 0.85 + +// Cubic implements the cubic algorithm from TCP +type Cubic struct { + clock Clock + + // Number of connections to simulate. + numConnections int + + // Time when this cycle started, after last loss event. + epoch time.Time + + // Max congestion window used just before last loss event. + // Note: to improve fairness to other streams an additional back off is + // applied to this value if the new value is below our latest value. + lastMaxCongestionWindow protocol.ByteCount + + // Number of acked bytes since the cycle started (epoch). + ackedBytesCount protocol.ByteCount + + // TCP Reno equivalent congestion window in packets. + estimatedTCPcongestionWindow protocol.ByteCount + + // Origin point of cubic function. + originPointCongestionWindow protocol.ByteCount + + // Time to origin point of cubic function in 2^10 fractions of a second. + timeToOriginPoint uint32 + + // Last congestion window in packets computed by cubic function. + lastTargetCongestionWindow protocol.ByteCount +} + +// NewCubic returns a new Cubic instance +func NewCubic(clock Clock) *Cubic { + c := &Cubic{ + clock: clock, + numConnections: defaultNumConnections, + } + c.Reset() + return c +} + +// Reset is called after a timeout to reset the cubic state +func (c *Cubic) Reset() { + c.epoch = time.Time{} + c.lastMaxCongestionWindow = 0 + c.ackedBytesCount = 0 + c.estimatedTCPcongestionWindow = 0 + c.originPointCongestionWindow = 0 + c.timeToOriginPoint = 0 + c.lastTargetCongestionWindow = 0 +} + +func (c *Cubic) alpha() float32 { + // TCPFriendly alpha is described in Section 3.3 of the CUBIC paper. Note that + // beta here is a cwnd multiplier, and is equal to 1-beta from the paper. + // We derive the equivalent alpha for an N-connection emulation as: + b := c.beta() + return 3 * float32(c.numConnections) * float32(c.numConnections) * (1 - b) / (1 + b) +} + +func (c *Cubic) beta() float32 { + // kNConnectionBeta is the backoff factor after loss for our N-connection + // emulation, which emulates the effective backoff of an ensemble of N + // TCP-Reno connections on a single loss event. The effective multiplier is + // computed as: + return (float32(c.numConnections) - 1 + beta) / float32(c.numConnections) +} + +func (c *Cubic) betaLastMax() float32 { + // betaLastMax is the additional backoff factor after loss for our + // N-connection emulation, which emulates the additional backoff of + // an ensemble of N TCP-Reno connections on a single loss event. The + // effective multiplier is computed as: + return (float32(c.numConnections) - 1 + betaLastMax) / float32(c.numConnections) +} + +// OnApplicationLimited is called on ack arrival when sender is unable to use +// the available congestion window. Resets Cubic state during quiescence. +func (c *Cubic) OnApplicationLimited() { + // When sender is not using the available congestion window, the window does + // not grow. But to be RTT-independent, Cubic assumes that the sender has been + // using the entire window during the time since the beginning of the current + // "epoch" (the end of the last loss recovery period). Since + // application-limited periods break this assumption, we reset the epoch when + // in such a period. This reset effectively freezes congestion window growth + // through application-limited periods and allows Cubic growth to continue + // when the entire window is being used. + c.epoch = time.Time{} +} + +// CongestionWindowAfterPacketLoss computes a new congestion window to use after +// a loss event. Returns the new congestion window in packets. The new +// congestion window is a multiplicative decrease of our current window. +func (c *Cubic) CongestionWindowAfterPacketLoss(currentCongestionWindow protocol.ByteCount) protocol.ByteCount { + if currentCongestionWindow+maxDatagramSize < c.lastMaxCongestionWindow { + // We never reached the old max, so assume we are competing with another + // flow. Use our extra back off factor to allow the other flow to go up. + c.lastMaxCongestionWindow = protocol.ByteCount(c.betaLastMax() * float32(currentCongestionWindow)) + } else { + c.lastMaxCongestionWindow = currentCongestionWindow + } + c.epoch = time.Time{} // Reset time. + return protocol.ByteCount(float32(currentCongestionWindow) * c.beta()) +} + +// CongestionWindowAfterAck computes a new congestion window to use after a received ACK. +// Returns the new congestion window in packets. The new congestion window +// follows a cubic function that depends on the time passed since last +// packet loss. +func (c *Cubic) CongestionWindowAfterAck( + ackedBytes protocol.ByteCount, + currentCongestionWindow protocol.ByteCount, + delayMin time.Duration, + eventTime time.Time, +) protocol.ByteCount { + c.ackedBytesCount += ackedBytes + + if c.epoch.IsZero() { + // First ACK after a loss event. + c.epoch = eventTime // Start of epoch. + c.ackedBytesCount = ackedBytes // Reset count. + // Reset estimated_tcp_congestion_window_ to be in sync with cubic. + c.estimatedTCPcongestionWindow = currentCongestionWindow + if c.lastMaxCongestionWindow <= currentCongestionWindow { + c.timeToOriginPoint = 0 + c.originPointCongestionWindow = currentCongestionWindow + } else { + c.timeToOriginPoint = uint32(math.Cbrt(float64(cubeFactor * (c.lastMaxCongestionWindow - currentCongestionWindow)))) + c.originPointCongestionWindow = c.lastMaxCongestionWindow + } + } + + // Change the time unit from microseconds to 2^10 fractions per second. Take + // the round trip time in account. This is done to allow us to use shift as a + // divide operator. + elapsedTime := int64(eventTime.Add(delayMin).Sub(c.epoch)/time.Microsecond) << 10 / (1000 * 1000) + + // Right-shifts of negative, signed numbers have implementation-dependent + // behavior, so force the offset to be positive, as is done in the kernel. + offset := int64(c.timeToOriginPoint) - elapsedTime + if offset < 0 { + offset = -offset + } + + deltaCongestionWindow := protocol.ByteCount(cubeCongestionWindowScale*offset*offset*offset) * maxDatagramSize >> cubeScale + var targetCongestionWindow protocol.ByteCount + if elapsedTime > int64(c.timeToOriginPoint) { + targetCongestionWindow = c.originPointCongestionWindow + deltaCongestionWindow + } else { + targetCongestionWindow = c.originPointCongestionWindow - deltaCongestionWindow + } + // Limit the CWND increase to half the acked bytes. + targetCongestionWindow = min(targetCongestionWindow, currentCongestionWindow+c.ackedBytesCount/2) + + // Increase the window by approximately Alpha * 1 MSS of bytes every + // time we ack an estimated tcp window of bytes. For small + // congestion windows (less than 25), the formula below will + // increase slightly slower than linearly per estimated tcp window + // of bytes. + c.estimatedTCPcongestionWindow += protocol.ByteCount(float32(c.ackedBytesCount) * c.alpha() * float32(maxDatagramSize) / float32(c.estimatedTCPcongestionWindow)) + c.ackedBytesCount = 0 + + // We have a new cubic congestion window. + c.lastTargetCongestionWindow = targetCongestionWindow + + // Compute target congestion_window based on cubic target and estimated TCP + // congestion_window, use highest (fastest). + if targetCongestionWindow < c.estimatedTCPcongestionWindow { + targetCongestionWindow = c.estimatedTCPcongestionWindow + } + return targetCongestionWindow +} + +// SetNumConnections sets the number of emulated connections +func (c *Cubic) SetNumConnections(n int) { + c.numConnections = n +} diff --git a/vendor/github.com/quic-go/quic-go/internal/congestion/cubic_sender.go b/vendor/github.com/quic-go/quic-go/internal/congestion/cubic_sender.go new file mode 100644 index 00000000..075b08e0 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/congestion/cubic_sender.go @@ -0,0 +1,316 @@ +package congestion + +import ( + "fmt" + "time" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/logging" +) + +const ( + // maxDatagramSize is the default maximum packet size used in the Linux TCP implementation. + // Used in QUIC for congestion window computations in bytes. + initialMaxDatagramSize = protocol.ByteCount(protocol.InitialPacketSize) + maxBurstPackets = 3 + renoBeta = 0.7 // Reno backoff factor. + minCongestionWindowPackets = 2 + initialCongestionWindow = 32 +) + +type cubicSender struct { + hybridSlowStart HybridSlowStart + rttStats *utils.RTTStats + cubic *Cubic + pacer *pacer + clock Clock + + reno bool + + // Track the largest packet that has been sent. + largestSentPacketNumber protocol.PacketNumber + + // Track the largest packet that has been acked. + largestAckedPacketNumber protocol.PacketNumber + + // Track the largest packet number outstanding when a CWND cutback occurs. + largestSentAtLastCutback protocol.PacketNumber + + // Whether the last loss event caused us to exit slowstart. + // Used for stats collection of slowstartPacketsLost + lastCutbackExitedSlowstart bool + + // Congestion window in bytes. + congestionWindow protocol.ByteCount + + // Slow start congestion window in bytes, aka ssthresh. + slowStartThreshold protocol.ByteCount + + // ACK counter for the Reno implementation. + numAckedPackets uint64 + + initialCongestionWindow protocol.ByteCount + initialMaxCongestionWindow protocol.ByteCount + + maxDatagramSize protocol.ByteCount + + lastState logging.CongestionState + tracer *logging.ConnectionTracer +} + +var ( + _ SendAlgorithm = &cubicSender{} + _ SendAlgorithmWithDebugInfos = &cubicSender{} +) + +// NewCubicSender makes a new cubic sender +func NewCubicSender( + clock Clock, + rttStats *utils.RTTStats, + initialMaxDatagramSize protocol.ByteCount, + reno bool, + tracer *logging.ConnectionTracer, +) *cubicSender { + return newCubicSender( + clock, + rttStats, + reno, + initialMaxDatagramSize, + initialCongestionWindow*initialMaxDatagramSize, + protocol.MaxCongestionWindowPackets*initialMaxDatagramSize, + tracer, + ) +} + +func newCubicSender( + clock Clock, + rttStats *utils.RTTStats, + reno bool, + initialMaxDatagramSize, + initialCongestionWindow, + initialMaxCongestionWindow protocol.ByteCount, + tracer *logging.ConnectionTracer, +) *cubicSender { + c := &cubicSender{ + rttStats: rttStats, + largestSentPacketNumber: protocol.InvalidPacketNumber, + largestAckedPacketNumber: protocol.InvalidPacketNumber, + largestSentAtLastCutback: protocol.InvalidPacketNumber, + initialCongestionWindow: initialCongestionWindow, + initialMaxCongestionWindow: initialMaxCongestionWindow, + congestionWindow: initialCongestionWindow, + slowStartThreshold: protocol.MaxByteCount, + cubic: NewCubic(clock), + clock: clock, + reno: reno, + tracer: tracer, + maxDatagramSize: initialMaxDatagramSize, + } + c.pacer = newPacer(c.BandwidthEstimate) + if c.tracer != nil && c.tracer.UpdatedCongestionState != nil { + c.lastState = logging.CongestionStateSlowStart + c.tracer.UpdatedCongestionState(logging.CongestionStateSlowStart) + } + return c +} + +// TimeUntilSend returns when the next packet should be sent. +func (c *cubicSender) TimeUntilSend(_ protocol.ByteCount) time.Time { + return c.pacer.TimeUntilSend() +} + +func (c *cubicSender) HasPacingBudget(now time.Time) bool { + return c.pacer.Budget(now) >= c.maxDatagramSize +} + +func (c *cubicSender) maxCongestionWindow() protocol.ByteCount { + return c.maxDatagramSize * protocol.MaxCongestionWindowPackets +} + +func (c *cubicSender) minCongestionWindow() protocol.ByteCount { + return c.maxDatagramSize * minCongestionWindowPackets +} + +func (c *cubicSender) OnPacketSent( + sentTime time.Time, + _ protocol.ByteCount, + packetNumber protocol.PacketNumber, + bytes protocol.ByteCount, + isRetransmittable bool, +) { + c.pacer.SentPacket(sentTime, bytes) + if !isRetransmittable { + return + } + c.largestSentPacketNumber = packetNumber + c.hybridSlowStart.OnPacketSent(packetNumber) +} + +func (c *cubicSender) CanSend(bytesInFlight protocol.ByteCount) bool { + return bytesInFlight < c.GetCongestionWindow() +} + +func (c *cubicSender) InRecovery() bool { + return c.largestAckedPacketNumber != protocol.InvalidPacketNumber && c.largestAckedPacketNumber <= c.largestSentAtLastCutback +} + +func (c *cubicSender) InSlowStart() bool { + return c.GetCongestionWindow() < c.slowStartThreshold +} + +func (c *cubicSender) GetCongestionWindow() protocol.ByteCount { + return c.congestionWindow +} + +func (c *cubicSender) MaybeExitSlowStart() { + if c.InSlowStart() && + c.hybridSlowStart.ShouldExitSlowStart(c.rttStats.LatestRTT(), c.rttStats.MinRTT(), c.GetCongestionWindow()/c.maxDatagramSize) { + // exit slow start + c.slowStartThreshold = c.congestionWindow + c.maybeTraceStateChange(logging.CongestionStateCongestionAvoidance) + } +} + +func (c *cubicSender) OnPacketAcked( + ackedPacketNumber protocol.PacketNumber, + ackedBytes protocol.ByteCount, + priorInFlight protocol.ByteCount, + eventTime time.Time, +) { + c.largestAckedPacketNumber = max(ackedPacketNumber, c.largestAckedPacketNumber) + if c.InRecovery() { + return + } + c.maybeIncreaseCwnd(ackedPacketNumber, ackedBytes, priorInFlight, eventTime) + if c.InSlowStart() { + c.hybridSlowStart.OnPacketAcked(ackedPacketNumber) + } +} + +func (c *cubicSender) OnCongestionEvent(packetNumber protocol.PacketNumber, lostBytes, priorInFlight protocol.ByteCount) { + // TCP NewReno (RFC6582) says that once a loss occurs, any losses in packets + // already sent should be treated as a single loss event, since it's expected. + if packetNumber <= c.largestSentAtLastCutback { + return + } + c.lastCutbackExitedSlowstart = c.InSlowStart() + c.maybeTraceStateChange(logging.CongestionStateRecovery) + + if c.reno { + c.congestionWindow = protocol.ByteCount(float64(c.congestionWindow) * renoBeta) + } else { + c.congestionWindow = c.cubic.CongestionWindowAfterPacketLoss(c.congestionWindow) + } + if minCwnd := c.minCongestionWindow(); c.congestionWindow < minCwnd { + c.congestionWindow = minCwnd + } + c.slowStartThreshold = c.congestionWindow + c.largestSentAtLastCutback = c.largestSentPacketNumber + // reset packet count from congestion avoidance mode. We start + // counting again when we're out of recovery. + c.numAckedPackets = 0 +} + +// Called when we receive an ack. Normal TCP tracks how many packets one ack +// represents, but quic has a separate ack for each packet. +func (c *cubicSender) maybeIncreaseCwnd( + _ protocol.PacketNumber, + ackedBytes protocol.ByteCount, + priorInFlight protocol.ByteCount, + eventTime time.Time, +) { + // Do not increase the congestion window unless the sender is close to using + // the current window. + if !c.isCwndLimited(priorInFlight) { + c.cubic.OnApplicationLimited() + c.maybeTraceStateChange(logging.CongestionStateApplicationLimited) + return + } + if c.congestionWindow >= c.maxCongestionWindow() { + return + } + if c.InSlowStart() { + // TCP slow start, exponential growth, increase by one for each ACK. + c.congestionWindow += c.maxDatagramSize + c.maybeTraceStateChange(logging.CongestionStateSlowStart) + return + } + // Congestion avoidance + c.maybeTraceStateChange(logging.CongestionStateCongestionAvoidance) + if c.reno { + // Classic Reno congestion avoidance. + c.numAckedPackets++ + if c.numAckedPackets >= uint64(c.congestionWindow/c.maxDatagramSize) { + c.congestionWindow += c.maxDatagramSize + c.numAckedPackets = 0 + } + } else { + c.congestionWindow = min(c.maxCongestionWindow(), c.cubic.CongestionWindowAfterAck(ackedBytes, c.congestionWindow, c.rttStats.MinRTT(), eventTime)) + } +} + +func (c *cubicSender) isCwndLimited(bytesInFlight protocol.ByteCount) bool { + congestionWindow := c.GetCongestionWindow() + if bytesInFlight >= congestionWindow { + return true + } + availableBytes := congestionWindow - bytesInFlight + slowStartLimited := c.InSlowStart() && bytesInFlight > congestionWindow/2 + return slowStartLimited || availableBytes <= maxBurstPackets*c.maxDatagramSize +} + +// BandwidthEstimate returns the current bandwidth estimate +func (c *cubicSender) BandwidthEstimate() Bandwidth { + srtt := c.rttStats.SmoothedRTT() + if srtt == 0 { + // If we haven't measured an rtt, the bandwidth estimate is unknown. + return infBandwidth + } + return BandwidthFromDelta(c.GetCongestionWindow(), srtt) +} + +// OnRetransmissionTimeout is called on an retransmission timeout +func (c *cubicSender) OnRetransmissionTimeout(packetsRetransmitted bool) { + c.largestSentAtLastCutback = protocol.InvalidPacketNumber + if !packetsRetransmitted { + return + } + c.hybridSlowStart.Restart() + c.cubic.Reset() + c.slowStartThreshold = c.congestionWindow / 2 + c.congestionWindow = c.minCongestionWindow() +} + +// OnConnectionMigration is called when the connection is migrated (?) +func (c *cubicSender) OnConnectionMigration() { + c.hybridSlowStart.Restart() + c.largestSentPacketNumber = protocol.InvalidPacketNumber + c.largestAckedPacketNumber = protocol.InvalidPacketNumber + c.largestSentAtLastCutback = protocol.InvalidPacketNumber + c.lastCutbackExitedSlowstart = false + c.cubic.Reset() + c.numAckedPackets = 0 + c.congestionWindow = c.initialCongestionWindow + c.slowStartThreshold = c.initialMaxCongestionWindow +} + +func (c *cubicSender) maybeTraceStateChange(new logging.CongestionState) { + if c.tracer == nil || c.tracer.UpdatedCongestionState == nil || new == c.lastState { + return + } + c.tracer.UpdatedCongestionState(new) + c.lastState = new +} + +func (c *cubicSender) SetMaxDatagramSize(s protocol.ByteCount) { + if s < c.maxDatagramSize { + panic(fmt.Sprintf("congestion BUG: decreased max datagram size from %d to %d", c.maxDatagramSize, s)) + } + cwndIsMinCwnd := c.congestionWindow == c.minCongestionWindow() + c.maxDatagramSize = s + if cwndIsMinCwnd { + c.congestionWindow = c.minCongestionWindow() + } + c.pacer.SetMaxDatagramSize(s) +} diff --git a/vendor/github.com/quic-go/quic-go/internal/congestion/hybrid_slow_start.go b/vendor/github.com/quic-go/quic-go/internal/congestion/hybrid_slow_start.go new file mode 100644 index 00000000..9679d9e4 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/congestion/hybrid_slow_start.go @@ -0,0 +1,112 @@ +package congestion + +import ( + "time" + + "github.com/quic-go/quic-go/internal/protocol" +) + +// Note(pwestin): the magic clamping numbers come from the original code in +// tcp_cubic.c. +const hybridStartLowWindow = protocol.ByteCount(16) + +// Number of delay samples for detecting the increase of delay. +const hybridStartMinSamples = uint32(8) + +// Exit slow start if the min rtt has increased by more than 1/8th. +const hybridStartDelayFactorExp = 3 // 2^3 = 8 +// The original paper specifies 2 and 8ms, but those have changed over time. +const ( + hybridStartDelayMinThresholdUs = int64(4000) + hybridStartDelayMaxThresholdUs = int64(16000) +) + +// HybridSlowStart implements the TCP hybrid slow start algorithm +type HybridSlowStart struct { + endPacketNumber protocol.PacketNumber + lastSentPacketNumber protocol.PacketNumber + started bool + currentMinRTT time.Duration + rttSampleCount uint32 + hystartFound bool +} + +// StartReceiveRound is called for the start of each receive round (burst) in the slow start phase. +func (s *HybridSlowStart) StartReceiveRound(lastSent protocol.PacketNumber) { + s.endPacketNumber = lastSent + s.currentMinRTT = 0 + s.rttSampleCount = 0 + s.started = true +} + +// IsEndOfRound returns true if this ack is the last packet number of our current slow start round. +func (s *HybridSlowStart) IsEndOfRound(ack protocol.PacketNumber) bool { + return s.endPacketNumber < ack +} + +// ShouldExitSlowStart should be called on every new ack frame, since a new +// RTT measurement can be made then. +// rtt: the RTT for this ack packet. +// minRTT: is the lowest delay (RTT) we have seen during the session. +// congestionWindow: the congestion window in packets. +func (s *HybridSlowStart) ShouldExitSlowStart(latestRTT time.Duration, minRTT time.Duration, congestionWindow protocol.ByteCount) bool { + if !s.started { + // Time to start the hybrid slow start. + s.StartReceiveRound(s.lastSentPacketNumber) + } + if s.hystartFound { + return true + } + // Second detection parameter - delay increase detection. + // Compare the minimum delay (s.currentMinRTT) of the current + // burst of packets relative to the minimum delay during the session. + // Note: we only look at the first few(8) packets in each burst, since we + // only want to compare the lowest RTT of the burst relative to previous + // bursts. + s.rttSampleCount++ + if s.rttSampleCount <= hybridStartMinSamples { + if s.currentMinRTT == 0 || s.currentMinRTT > latestRTT { + s.currentMinRTT = latestRTT + } + } + // We only need to check this once per round. + if s.rttSampleCount == hybridStartMinSamples { + // Divide minRTT by 8 to get a rtt increase threshold for exiting. + minRTTincreaseThresholdUs := int64(minRTT / time.Microsecond >> hybridStartDelayFactorExp) + // Ensure the rtt threshold is never less than 2ms or more than 16ms. + minRTTincreaseThresholdUs = min(minRTTincreaseThresholdUs, hybridStartDelayMaxThresholdUs) + minRTTincreaseThreshold := time.Duration(max(minRTTincreaseThresholdUs, hybridStartDelayMinThresholdUs)) * time.Microsecond + + if s.currentMinRTT > (minRTT + minRTTincreaseThreshold) { + s.hystartFound = true + } + } + // Exit from slow start if the cwnd is greater than 16 and + // increasing delay is found. + return congestionWindow >= hybridStartLowWindow && s.hystartFound +} + +// OnPacketSent is called when a packet was sent +func (s *HybridSlowStart) OnPacketSent(packetNumber protocol.PacketNumber) { + s.lastSentPacketNumber = packetNumber +} + +// OnPacketAcked gets invoked after ShouldExitSlowStart, so it's best to end +// the round when the final packet of the burst is received and start it on +// the next incoming ack. +func (s *HybridSlowStart) OnPacketAcked(ackedPacketNumber protocol.PacketNumber) { + if s.IsEndOfRound(ackedPacketNumber) { + s.started = false + } +} + +// Started returns true if started +func (s *HybridSlowStart) Started() bool { + return s.started +} + +// Restart the slow start phase +func (s *HybridSlowStart) Restart() { + s.started = false + s.hystartFound = false +} diff --git a/vendor/github.com/quic-go/quic-go/internal/congestion/interface.go b/vendor/github.com/quic-go/quic-go/internal/congestion/interface.go new file mode 100644 index 00000000..881f453b --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/congestion/interface.go @@ -0,0 +1,28 @@ +package congestion + +import ( + "time" + + "github.com/quic-go/quic-go/internal/protocol" +) + +// A SendAlgorithm performs congestion control +type SendAlgorithm interface { + TimeUntilSend(bytesInFlight protocol.ByteCount) time.Time + HasPacingBudget(now time.Time) bool + OnPacketSent(sentTime time.Time, bytesInFlight protocol.ByteCount, packetNumber protocol.PacketNumber, bytes protocol.ByteCount, isRetransmittable bool) + CanSend(bytesInFlight protocol.ByteCount) bool + MaybeExitSlowStart() + OnPacketAcked(number protocol.PacketNumber, ackedBytes protocol.ByteCount, priorInFlight protocol.ByteCount, eventTime time.Time) + OnCongestionEvent(number protocol.PacketNumber, lostBytes protocol.ByteCount, priorInFlight protocol.ByteCount) + OnRetransmissionTimeout(packetsRetransmitted bool) + SetMaxDatagramSize(protocol.ByteCount) +} + +// A SendAlgorithmWithDebugInfos is a SendAlgorithm that exposes some debug infos +type SendAlgorithmWithDebugInfos interface { + SendAlgorithm + InSlowStart() bool + InRecovery() bool + GetCongestionWindow() protocol.ByteCount +} diff --git a/vendor/github.com/quic-go/quic-go/internal/congestion/pacer.go b/vendor/github.com/quic-go/quic-go/internal/congestion/pacer.go new file mode 100644 index 00000000..34d3d1d0 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/congestion/pacer.go @@ -0,0 +1,84 @@ +package congestion + +import ( + "time" + + "github.com/quic-go/quic-go/internal/protocol" +) + +const maxBurstSizePackets = 10 + +// The pacer implements a token bucket pacing algorithm. +type pacer struct { + budgetAtLastSent protocol.ByteCount + maxDatagramSize protocol.ByteCount + lastSentTime time.Time + adjustedBandwidth func() uint64 // in bytes/s +} + +func newPacer(getBandwidth func() Bandwidth) *pacer { + p := &pacer{ + maxDatagramSize: initialMaxDatagramSize, + adjustedBandwidth: func() uint64 { + // Bandwidth is in bits/s. We need the value in bytes/s. + bw := uint64(getBandwidth() / BytesPerSecond) + // Use a slightly higher value than the actual measured bandwidth. + // RTT variations then won't result in under-utilization of the congestion window. + // Ultimately, this will result in sending packets as acknowledgments are received rather than when timers fire, + // provided the congestion window is fully utilized and acknowledgments arrive at regular intervals. + return bw * 5 / 4 + }, + } + p.budgetAtLastSent = p.maxBurstSize() + return p +} + +func (p *pacer) SentPacket(sendTime time.Time, size protocol.ByteCount) { + budget := p.Budget(sendTime) + if size >= budget { + p.budgetAtLastSent = 0 + } else { + p.budgetAtLastSent = budget - size + } + p.lastSentTime = sendTime +} + +func (p *pacer) Budget(now time.Time) protocol.ByteCount { + if p.lastSentTime.IsZero() { + return p.maxBurstSize() + } + budget := p.budgetAtLastSent + (protocol.ByteCount(p.adjustedBandwidth())*protocol.ByteCount(now.Sub(p.lastSentTime).Nanoseconds()))/1e9 + if budget < 0 { // protect against overflows + budget = protocol.MaxByteCount + } + return min(p.maxBurstSize(), budget) +} + +func (p *pacer) maxBurstSize() protocol.ByteCount { + return max( + protocol.ByteCount(uint64((protocol.MinPacingDelay+protocol.TimerGranularity).Nanoseconds())*p.adjustedBandwidth())/1e9, + maxBurstSizePackets*p.maxDatagramSize, + ) +} + +// TimeUntilSend returns when the next packet should be sent. +// It returns the zero value of time.Time if a packet can be sent immediately. +func (p *pacer) TimeUntilSend() time.Time { + if p.budgetAtLastSent >= p.maxDatagramSize { + return time.Time{} + } + diff := 1e9 * uint64(p.maxDatagramSize-p.budgetAtLastSent) + bw := p.adjustedBandwidth() + // We might need to round up this value. + // Otherwise, we might have a budget (slightly) smaller than the datagram size when the timer expires. + d := diff / bw + // this is effectively a math.Ceil, but using only integer math + if diff%bw > 0 { + d++ + } + return p.lastSentTime.Add(max(protocol.MinPacingDelay, time.Duration(d)*time.Nanosecond)) +} + +func (p *pacer) SetMaxDatagramSize(s protocol.ByteCount) { + p.maxDatagramSize = s +} diff --git a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/base_flow_controller.go b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/base_flow_controller.go new file mode 100644 index 00000000..3d88d577 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/base_flow_controller.go @@ -0,0 +1,127 @@ +package flowcontrol + +import ( + "sync" + "time" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" +) + +type baseFlowController struct { + // for sending data + bytesSent protocol.ByteCount + sendWindow protocol.ByteCount + lastBlockedAt protocol.ByteCount + + // for receiving data + //nolint:structcheck // The mutex is used both by the stream and the connection flow controller + mutex sync.Mutex + bytesRead protocol.ByteCount + highestReceived protocol.ByteCount + receiveWindow protocol.ByteCount + receiveWindowSize protocol.ByteCount + maxReceiveWindowSize protocol.ByteCount + + allowWindowIncrease func(size protocol.ByteCount) bool + + epochStartTime time.Time + epochStartOffset protocol.ByteCount + rttStats *utils.RTTStats + + logger utils.Logger +} + +// IsNewlyBlocked says if it is newly blocked by flow control. +// For every offset, it only returns true once. +// If it is blocked, the offset is returned. +func (c *baseFlowController) IsNewlyBlocked() (bool, protocol.ByteCount) { + if c.sendWindowSize() != 0 || c.sendWindow == c.lastBlockedAt { + return false, 0 + } + c.lastBlockedAt = c.sendWindow + return true, c.sendWindow +} + +func (c *baseFlowController) AddBytesSent(n protocol.ByteCount) { + c.bytesSent += n +} + +// UpdateSendWindow is called after receiving a MAX_{STREAM_}DATA frame. +func (c *baseFlowController) UpdateSendWindow(offset protocol.ByteCount) (updated bool) { + if offset > c.sendWindow { + c.sendWindow = offset + return true + } + return false +} + +func (c *baseFlowController) sendWindowSize() protocol.ByteCount { + // this only happens during connection establishment, when data is sent before we receive the peer's transport parameters + if c.bytesSent > c.sendWindow { + return 0 + } + return c.sendWindow - c.bytesSent +} + +// needs to be called with locked mutex +func (c *baseFlowController) addBytesRead(n protocol.ByteCount) { + // pretend we sent a WindowUpdate when reading the first byte + // this way auto-tuning of the window size already works for the first WindowUpdate + if c.bytesRead == 0 { + c.startNewAutoTuningEpoch(time.Now()) + } + c.bytesRead += n +} + +func (c *baseFlowController) hasWindowUpdate() bool { + bytesRemaining := c.receiveWindow - c.bytesRead + // update the window when more than the threshold was consumed + return bytesRemaining <= protocol.ByteCount(float64(c.receiveWindowSize)*(1-protocol.WindowUpdateThreshold)) +} + +// getWindowUpdate updates the receive window, if necessary +// it returns the new offset +func (c *baseFlowController) getWindowUpdate() protocol.ByteCount { + if !c.hasWindowUpdate() { + return 0 + } + + c.maybeAdjustWindowSize() + c.receiveWindow = c.bytesRead + c.receiveWindowSize + return c.receiveWindow +} + +// maybeAdjustWindowSize increases the receiveWindowSize if we're sending updates too often. +// For details about auto-tuning, see https://docs.google.com/document/d/1SExkMmGiz8VYzV3s9E35JQlJ73vhzCekKkDi85F1qCE/edit?usp=sharing. +func (c *baseFlowController) maybeAdjustWindowSize() { + bytesReadInEpoch := c.bytesRead - c.epochStartOffset + // don't do anything if less than half the window has been consumed + if bytesReadInEpoch <= c.receiveWindowSize/2 { + return + } + rtt := c.rttStats.SmoothedRTT() + if rtt == 0 { + return + } + + fraction := float64(bytesReadInEpoch) / float64(c.receiveWindowSize) + now := time.Now() + if now.Sub(c.epochStartTime) < time.Duration(4*fraction*float64(rtt)) { + // window is consumed too fast, try to increase the window size + newSize := min(2*c.receiveWindowSize, c.maxReceiveWindowSize) + if newSize > c.receiveWindowSize && (c.allowWindowIncrease == nil || c.allowWindowIncrease(newSize-c.receiveWindowSize)) { + c.receiveWindowSize = newSize + } + } + c.startNewAutoTuningEpoch(now) +} + +func (c *baseFlowController) startNewAutoTuningEpoch(now time.Time) { + c.epochStartTime = now + c.epochStartOffset = c.bytesRead +} + +func (c *baseFlowController) checkFlowControlViolation() bool { + return c.highestReceived > c.receiveWindow +} diff --git a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/connection_flow_controller.go b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/connection_flow_controller.go new file mode 100644 index 00000000..2efcad74 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/connection_flow_controller.go @@ -0,0 +1,104 @@ +package flowcontrol + +import ( + "errors" + "fmt" + "time" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/utils" +) + +type connectionFlowController struct { + baseFlowController +} + +var _ ConnectionFlowController = &connectionFlowController{} + +// NewConnectionFlowController gets a new flow controller for the connection +// It is created before we receive the peer's transport parameters, thus it starts with a sendWindow of 0. +func NewConnectionFlowController( + receiveWindow protocol.ByteCount, + maxReceiveWindow protocol.ByteCount, + allowWindowIncrease func(size protocol.ByteCount) bool, + rttStats *utils.RTTStats, + logger utils.Logger, +) ConnectionFlowController { + return &connectionFlowController{ + baseFlowController: baseFlowController{ + rttStats: rttStats, + receiveWindow: receiveWindow, + receiveWindowSize: receiveWindow, + maxReceiveWindowSize: maxReceiveWindow, + allowWindowIncrease: allowWindowIncrease, + logger: logger, + }, + } +} + +func (c *connectionFlowController) SendWindowSize() protocol.ByteCount { + return c.baseFlowController.sendWindowSize() +} + +// IncrementHighestReceived adds an increment to the highestReceived value +func (c *connectionFlowController) IncrementHighestReceived(increment protocol.ByteCount) error { + c.mutex.Lock() + defer c.mutex.Unlock() + + c.highestReceived += increment + if c.checkFlowControlViolation() { + return &qerr.TransportError{ + ErrorCode: qerr.FlowControlError, + ErrorMessage: fmt.Sprintf("received %d bytes for the connection, allowed %d bytes", c.highestReceived, c.receiveWindow), + } + } + return nil +} + +func (c *connectionFlowController) AddBytesRead(n protocol.ByteCount) { + c.mutex.Lock() + c.baseFlowController.addBytesRead(n) + c.mutex.Unlock() +} + +func (c *connectionFlowController) GetWindowUpdate() protocol.ByteCount { + c.mutex.Lock() + oldWindowSize := c.receiveWindowSize + offset := c.baseFlowController.getWindowUpdate() + if c.logger.Debug() && oldWindowSize < c.receiveWindowSize { + c.logger.Debugf("Increasing receive flow control window for the connection to %d kB", c.receiveWindowSize/(1<<10)) + } + c.mutex.Unlock() + return offset +} + +// EnsureMinimumWindowSize sets a minimum window size +// it should make sure that the connection-level window is increased when a stream-level window grows +func (c *connectionFlowController) EnsureMinimumWindowSize(inc protocol.ByteCount) { + c.mutex.Lock() + if inc > c.receiveWindowSize { + c.logger.Debugf("Increasing receive flow control window for the connection to %d kB, in response to stream flow control window increase", c.receiveWindowSize/(1<<10)) + newSize := min(inc, c.maxReceiveWindowSize) + if delta := newSize - c.receiveWindowSize; delta > 0 && c.allowWindowIncrease(delta) { + c.receiveWindowSize = newSize + } + c.startNewAutoTuningEpoch(time.Now()) + } + c.mutex.Unlock() +} + +// Reset rests the flow controller. This happens when 0-RTT is rejected. +// All stream data is invalidated, it's if we had never opened a stream and never sent any data. +// At that point, we only have sent stream data, but we didn't have the keys to open 1-RTT keys yet. +func (c *connectionFlowController) Reset() error { + c.mutex.Lock() + defer c.mutex.Unlock() + + if c.bytesRead > 0 || c.highestReceived > 0 || !c.epochStartTime.IsZero() { + return errors.New("flow controller reset after reading data") + } + c.bytesSent = 0 + c.lastBlockedAt = 0 + return nil +} diff --git a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/interface.go b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/interface.go new file mode 100644 index 00000000..57d12a95 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/interface.go @@ -0,0 +1,43 @@ +package flowcontrol + +import "github.com/quic-go/quic-go/internal/protocol" + +type flowController interface { + // for sending + SendWindowSize() protocol.ByteCount + UpdateSendWindow(protocol.ByteCount) (updated bool) + AddBytesSent(protocol.ByteCount) + // for receiving + GetWindowUpdate() protocol.ByteCount // returns 0 if no update is necessary +} + +// A StreamFlowController is a flow controller for a QUIC stream. +type StreamFlowController interface { + flowController + AddBytesRead(protocol.ByteCount) (shouldQueueWindowUpdate bool) + // UpdateHighestReceived is called when a new highest offset is received + // final has to be to true if this is the final offset of the stream, + // as contained in a STREAM frame with FIN bit, and the RESET_STREAM frame + UpdateHighestReceived(offset protocol.ByteCount, final bool) error + // Abandon is called when reading from the stream is aborted early, + // and there won't be any further calls to AddBytesRead. + Abandon() + IsNewlyBlocked() bool +} + +// The ConnectionFlowController is the flow controller for the connection. +type ConnectionFlowController interface { + flowController + AddBytesRead(protocol.ByteCount) + Reset() error + IsNewlyBlocked() (bool, protocol.ByteCount) +} + +type connectionFlowControllerI interface { + ConnectionFlowController + // The following two methods are not supposed to be called from outside this packet, but are needed internally + // for sending + EnsureMinimumWindowSize(protocol.ByteCount) + // for receiving + IncrementHighestReceived(protocol.ByteCount) error +} diff --git a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/stream_flow_controller.go b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/stream_flow_controller.go new file mode 100644 index 00000000..2d58351c --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/stream_flow_controller.go @@ -0,0 +1,149 @@ +package flowcontrol + +import ( + "fmt" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/utils" +) + +type streamFlowController struct { + baseFlowController + + streamID protocol.StreamID + + connection connectionFlowControllerI + + receivedFinalOffset bool +} + +var _ StreamFlowController = &streamFlowController{} + +// NewStreamFlowController gets a new flow controller for a stream +func NewStreamFlowController( + streamID protocol.StreamID, + cfc ConnectionFlowController, + receiveWindow protocol.ByteCount, + maxReceiveWindow protocol.ByteCount, + initialSendWindow protocol.ByteCount, + rttStats *utils.RTTStats, + logger utils.Logger, +) StreamFlowController { + return &streamFlowController{ + streamID: streamID, + connection: cfc.(connectionFlowControllerI), + baseFlowController: baseFlowController{ + rttStats: rttStats, + receiveWindow: receiveWindow, + receiveWindowSize: receiveWindow, + maxReceiveWindowSize: maxReceiveWindow, + sendWindow: initialSendWindow, + logger: logger, + }, + } +} + +// UpdateHighestReceived updates the highestReceived value, if the offset is higher. +func (c *streamFlowController) UpdateHighestReceived(offset protocol.ByteCount, final bool) error { + // If the final offset for this stream is already known, check for consistency. + if c.receivedFinalOffset { + // If we receive another final offset, check that it's the same. + if final && offset != c.highestReceived { + return &qerr.TransportError{ + ErrorCode: qerr.FinalSizeError, + ErrorMessage: fmt.Sprintf("received inconsistent final offset for stream %d (old: %d, new: %d bytes)", c.streamID, c.highestReceived, offset), + } + } + // Check that the offset is below the final offset. + if offset > c.highestReceived { + return &qerr.TransportError{ + ErrorCode: qerr.FinalSizeError, + ErrorMessage: fmt.Sprintf("received offset %d for stream %d, but final offset was already received at %d", offset, c.streamID, c.highestReceived), + } + } + } + + if final { + c.receivedFinalOffset = true + } + if offset == c.highestReceived { + return nil + } + // A higher offset was received before. + // This can happen due to reordering. + if offset <= c.highestReceived { + if final { + return &qerr.TransportError{ + ErrorCode: qerr.FinalSizeError, + ErrorMessage: fmt.Sprintf("received final offset %d for stream %d, but already received offset %d before", offset, c.streamID, c.highestReceived), + } + } + return nil + } + + increment := offset - c.highestReceived + c.highestReceived = offset + if c.checkFlowControlViolation() { + return &qerr.TransportError{ + ErrorCode: qerr.FlowControlError, + ErrorMessage: fmt.Sprintf("received %d bytes on stream %d, allowed %d bytes", offset, c.streamID, c.receiveWindow), + } + } + return c.connection.IncrementHighestReceived(increment) +} + +func (c *streamFlowController) AddBytesRead(n protocol.ByteCount) (shouldQueueWindowUpdate bool) { + c.mutex.Lock() + c.baseFlowController.addBytesRead(n) + shouldQueueWindowUpdate = c.shouldQueueWindowUpdate() + c.mutex.Unlock() + c.connection.AddBytesRead(n) + return +} + +func (c *streamFlowController) Abandon() { + c.mutex.Lock() + unread := c.highestReceived - c.bytesRead + c.bytesRead = c.highestReceived + c.mutex.Unlock() + if unread > 0 { + c.connection.AddBytesRead(unread) + } +} + +func (c *streamFlowController) AddBytesSent(n protocol.ByteCount) { + c.baseFlowController.AddBytesSent(n) + c.connection.AddBytesSent(n) +} + +func (c *streamFlowController) SendWindowSize() protocol.ByteCount { + return min(c.baseFlowController.sendWindowSize(), c.connection.SendWindowSize()) +} + +func (c *streamFlowController) IsNewlyBlocked() bool { + blocked, _ := c.baseFlowController.IsNewlyBlocked() + return blocked +} + +func (c *streamFlowController) shouldQueueWindowUpdate() bool { + return !c.receivedFinalOffset && c.hasWindowUpdate() +} + +func (c *streamFlowController) GetWindowUpdate() protocol.ByteCount { + // If we already received the final offset for this stream, the peer won't need any additional flow control credit. + if c.receivedFinalOffset { + return 0 + } + + // Don't use defer for unlocking the mutex here, GetWindowUpdate() is called frequently and defer shows up in the profiler + c.mutex.Lock() + oldWindowSize := c.receiveWindowSize + offset := c.baseFlowController.getWindowUpdate() + if c.receiveWindowSize > oldWindowSize { // auto-tuning enlarged the window size + c.logger.Debugf("Increasing receive flow control window for stream %d to %d kB", c.streamID, c.receiveWindowSize/(1<<10)) + c.connection.EnsureMinimumWindowSize(protocol.ByteCount(float64(c.receiveWindowSize) * protocol.ConnectionFlowControlMultiplier)) + } + c.mutex.Unlock() + return offset +} diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/aead.go b/vendor/github.com/quic-go/quic-go/internal/handshake/aead.go new file mode 100644 index 00000000..1baf5d6b --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/aead.go @@ -0,0 +1,90 @@ +package handshake + +import ( + "encoding/binary" + + "github.com/quic-go/quic-go/internal/protocol" +) + +func createAEAD(suite *cipherSuite, trafficSecret []byte, v protocol.Version) *xorNonceAEAD { + keyLabel := hkdfLabelKeyV1 + ivLabel := hkdfLabelIVV1 + if v == protocol.Version2 { + keyLabel = hkdfLabelKeyV2 + ivLabel = hkdfLabelIVV2 + } + key := hkdfExpandLabel(suite.Hash, trafficSecret, []byte{}, keyLabel, suite.KeyLen) + iv := hkdfExpandLabel(suite.Hash, trafficSecret, []byte{}, ivLabel, suite.IVLen()) + return suite.AEAD(key, iv) +} + +type longHeaderSealer struct { + aead *xorNonceAEAD + headerProtector headerProtector + nonceBuf [8]byte +} + +var _ LongHeaderSealer = &longHeaderSealer{} + +func newLongHeaderSealer(aead *xorNonceAEAD, headerProtector headerProtector) LongHeaderSealer { + if aead.NonceSize() != 8 { + panic("unexpected nonce size") + } + return &longHeaderSealer{ + aead: aead, + headerProtector: headerProtector, + } +} + +func (s *longHeaderSealer) Seal(dst, src []byte, pn protocol.PacketNumber, ad []byte) []byte { + binary.BigEndian.PutUint64(s.nonceBuf[:], uint64(pn)) + return s.aead.Seal(dst, s.nonceBuf[:], src, ad) +} + +func (s *longHeaderSealer) EncryptHeader(sample []byte, firstByte *byte, pnBytes []byte) { + s.headerProtector.EncryptHeader(sample, firstByte, pnBytes) +} + +func (s *longHeaderSealer) Overhead() int { + return s.aead.Overhead() +} + +type longHeaderOpener struct { + aead *xorNonceAEAD + headerProtector headerProtector + highestRcvdPN protocol.PacketNumber // highest packet number received (which could be successfully unprotected) + + // use a single array to avoid allocations + nonceBuf [8]byte +} + +var _ LongHeaderOpener = &longHeaderOpener{} + +func newLongHeaderOpener(aead *xorNonceAEAD, headerProtector headerProtector) LongHeaderOpener { + if aead.NonceSize() != 8 { + panic("unexpected nonce size") + } + return &longHeaderOpener{ + aead: aead, + headerProtector: headerProtector, + } +} + +func (o *longHeaderOpener) DecodePacketNumber(wirePN protocol.PacketNumber, wirePNLen protocol.PacketNumberLen) protocol.PacketNumber { + return protocol.DecodePacketNumber(wirePNLen, o.highestRcvdPN, wirePN) +} + +func (o *longHeaderOpener) Open(dst, src []byte, pn protocol.PacketNumber, ad []byte) ([]byte, error) { + binary.BigEndian.PutUint64(o.nonceBuf[:], uint64(pn)) + dec, err := o.aead.Open(dst, o.nonceBuf[:], src, ad) + if err == nil { + o.highestRcvdPN = max(o.highestRcvdPN, pn) + } else { + err = ErrDecryptionFailed + } + return dec, err +} + +func (o *longHeaderOpener) DecryptHeader(sample []byte, firstByte *byte, pnBytes []byte) { + o.headerProtector.DecryptHeader(sample, firstByte, pnBytes) +} diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/cipher_suite.go b/vendor/github.com/quic-go/quic-go/internal/handshake/cipher_suite.go new file mode 100644 index 00000000..d8a381da --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/cipher_suite.go @@ -0,0 +1,104 @@ +package handshake + +import ( + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/tls" + "fmt" + + "golang.org/x/crypto/chacha20poly1305" +) + +// These cipher suite implementations are copied from the standard library crypto/tls package. + +const aeadNonceLength = 12 + +type cipherSuite struct { + ID uint16 + Hash crypto.Hash + KeyLen int + AEAD func(key, nonceMask []byte) *xorNonceAEAD +} + +func (s cipherSuite) IVLen() int { return aeadNonceLength } + +func getCipherSuite(id uint16) *cipherSuite { + switch id { + case tls.TLS_AES_128_GCM_SHA256: + return &cipherSuite{ID: tls.TLS_AES_128_GCM_SHA256, Hash: crypto.SHA256, KeyLen: 16, AEAD: aeadAESGCMTLS13} + case tls.TLS_CHACHA20_POLY1305_SHA256: + return &cipherSuite{ID: tls.TLS_CHACHA20_POLY1305_SHA256, Hash: crypto.SHA256, KeyLen: 32, AEAD: aeadChaCha20Poly1305} + case tls.TLS_AES_256_GCM_SHA384: + return &cipherSuite{ID: tls.TLS_AES_256_GCM_SHA384, Hash: crypto.SHA384, KeyLen: 32, AEAD: aeadAESGCMTLS13} + default: + panic(fmt.Sprintf("unknown cypher suite: %d", id)) + } +} + +func aeadAESGCMTLS13(key, nonceMask []byte) *xorNonceAEAD { + if len(nonceMask) != aeadNonceLength { + panic("tls: internal error: wrong nonce length") + } + aes, err := aes.NewCipher(key) + if err != nil { + panic(err) + } + aead, err := cipher.NewGCM(aes) + if err != nil { + panic(err) + } + + ret := &xorNonceAEAD{aead: aead} + copy(ret.nonceMask[:], nonceMask) + return ret +} + +func aeadChaCha20Poly1305(key, nonceMask []byte) *xorNonceAEAD { + if len(nonceMask) != aeadNonceLength { + panic("tls: internal error: wrong nonce length") + } + aead, err := chacha20poly1305.New(key) + if err != nil { + panic(err) + } + + ret := &xorNonceAEAD{aead: aead} + copy(ret.nonceMask[:], nonceMask) + return ret +} + +// xorNonceAEAD wraps an AEAD by XORing in a fixed pattern to the nonce +// before each call. +type xorNonceAEAD struct { + nonceMask [aeadNonceLength]byte + aead cipher.AEAD +} + +func (f *xorNonceAEAD) NonceSize() int { return 8 } // 64-bit sequence number +func (f *xorNonceAEAD) Overhead() int { return f.aead.Overhead() } +func (f *xorNonceAEAD) explicitNonceLen() int { return 0 } + +func (f *xorNonceAEAD) Seal(out, nonce, plaintext, additionalData []byte) []byte { + for i, b := range nonce { + f.nonceMask[4+i] ^= b + } + result := f.aead.Seal(out, f.nonceMask[:], plaintext, additionalData) + for i, b := range nonce { + f.nonceMask[4+i] ^= b + } + + return result +} + +func (f *xorNonceAEAD) Open(out, nonce, ciphertext, additionalData []byte) ([]byte, error) { + for i, b := range nonce { + f.nonceMask[4+i] ^= b + } + result, err := f.aead.Open(out, f.nonceMask[:], ciphertext, additionalData) + for i, b := range nonce { + f.nonceMask[4+i] ^= b + } + + return result, err +} diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go b/vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go new file mode 100644 index 00000000..c8e6cb33 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go @@ -0,0 +1,634 @@ +package handshake + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net" + "strings" + "sync/atomic" + "time" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/qtls" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" + "github.com/quic-go/quic-go/logging" + "github.com/quic-go/quic-go/quicvarint" +) + +type quicVersionContextKey struct{} + +var QUICVersionContextKey = &quicVersionContextKey{} + +const clientSessionStateRevision = 4 + +type cryptoSetup struct { + tlsConf *tls.Config + conn *tls.QUICConn + + events []Event + + version protocol.Version + + ourParams *wire.TransportParameters + peerParams *wire.TransportParameters + + zeroRTTParameters *wire.TransportParameters + allow0RTT bool + + rttStats *utils.RTTStats + + tracer *logging.ConnectionTracer + logger utils.Logger + + perspective protocol.Perspective + + handshakeCompleteTime time.Time + + zeroRTTOpener LongHeaderOpener // only set for the server + zeroRTTSealer LongHeaderSealer // only set for the client + + initialOpener LongHeaderOpener + initialSealer LongHeaderSealer + + handshakeOpener LongHeaderOpener + handshakeSealer LongHeaderSealer + + used0RTT atomic.Bool + + aead *updatableAEAD + has1RTTSealer bool + has1RTTOpener bool +} + +var _ CryptoSetup = &cryptoSetup{} + +// NewCryptoSetupClient creates a new crypto setup for the client +func NewCryptoSetupClient( + connID protocol.ConnectionID, + tp *wire.TransportParameters, + tlsConf *tls.Config, + enable0RTT bool, + rttStats *utils.RTTStats, + tracer *logging.ConnectionTracer, + logger utils.Logger, + version protocol.Version, +) CryptoSetup { + cs := newCryptoSetup( + connID, + tp, + rttStats, + tracer, + logger, + protocol.PerspectiveClient, + version, + ) + + tlsConf = tlsConf.Clone() + tlsConf.MinVersion = tls.VersionTLS13 + quicConf := &tls.QUICConfig{TLSConfig: tlsConf} + qtls.SetupConfigForClient(quicConf, cs.marshalDataForSessionState, cs.handleDataFromSessionState) + cs.tlsConf = tlsConf + cs.allow0RTT = enable0RTT + + cs.conn = tls.QUICClient(quicConf) + cs.conn.SetTransportParameters(cs.ourParams.Marshal(protocol.PerspectiveClient)) + + return cs +} + +// NewCryptoSetupServer creates a new crypto setup for the server +func NewCryptoSetupServer( + connID protocol.ConnectionID, + localAddr, remoteAddr net.Addr, + tp *wire.TransportParameters, + tlsConf *tls.Config, + allow0RTT bool, + rttStats *utils.RTTStats, + tracer *logging.ConnectionTracer, + logger utils.Logger, + version protocol.Version, +) CryptoSetup { + cs := newCryptoSetup( + connID, + tp, + rttStats, + tracer, + logger, + protocol.PerspectiveServer, + version, + ) + cs.allow0RTT = allow0RTT + + tlsConf = qtls.SetupConfigForServer(tlsConf, localAddr, remoteAddr, cs.getDataForSessionTicket, cs.handleSessionTicket) + cs.tlsConf = tlsConf + cs.conn = tls.QUICServer(&tls.QUICConfig{TLSConfig: tlsConf}) + return cs +} + +func newCryptoSetup( + connID protocol.ConnectionID, + tp *wire.TransportParameters, + rttStats *utils.RTTStats, + tracer *logging.ConnectionTracer, + logger utils.Logger, + perspective protocol.Perspective, + version protocol.Version, +) *cryptoSetup { + initialSealer, initialOpener := NewInitialAEAD(connID, perspective, version) + if tracer != nil && tracer.UpdatedKeyFromTLS != nil { + tracer.UpdatedKeyFromTLS(protocol.EncryptionInitial, protocol.PerspectiveClient) + tracer.UpdatedKeyFromTLS(protocol.EncryptionInitial, protocol.PerspectiveServer) + } + return &cryptoSetup{ + initialSealer: initialSealer, + initialOpener: initialOpener, + aead: newUpdatableAEAD(rttStats, tracer, logger, version), + events: make([]Event, 0, 16), + ourParams: tp, + rttStats: rttStats, + tracer: tracer, + logger: logger, + perspective: perspective, + version: version, + } +} + +func (h *cryptoSetup) ChangeConnectionID(id protocol.ConnectionID) { + initialSealer, initialOpener := NewInitialAEAD(id, h.perspective, h.version) + h.initialSealer = initialSealer + h.initialOpener = initialOpener + if h.tracer != nil && h.tracer.UpdatedKeyFromTLS != nil { + h.tracer.UpdatedKeyFromTLS(protocol.EncryptionInitial, protocol.PerspectiveClient) + h.tracer.UpdatedKeyFromTLS(protocol.EncryptionInitial, protocol.PerspectiveServer) + } +} + +func (h *cryptoSetup) SetLargest1RTTAcked(pn protocol.PacketNumber) error { + return h.aead.SetLargestAcked(pn) +} + +func (h *cryptoSetup) StartHandshake(ctx context.Context) error { + err := h.conn.Start(context.WithValue(ctx, QUICVersionContextKey, h.version)) + if err != nil { + return wrapError(err) + } + for { + ev := h.conn.NextEvent() + done, err := h.handleEvent(ev) + if err != nil { + return wrapError(err) + } + if done { + break + } + } + if h.perspective == protocol.PerspectiveClient { + if h.zeroRTTSealer != nil && h.zeroRTTParameters != nil { + h.logger.Debugf("Doing 0-RTT.") + h.events = append(h.events, Event{Kind: EventRestoredTransportParameters, TransportParameters: h.zeroRTTParameters}) + } else { + h.logger.Debugf("Not doing 0-RTT. Has sealer: %t, has params: %t", h.zeroRTTSealer != nil, h.zeroRTTParameters != nil) + } + } + return nil +} + +// Close closes the crypto setup. +// It aborts the handshake, if it is still running. +func (h *cryptoSetup) Close() error { + return h.conn.Close() +} + +// HandleMessage handles a TLS handshake message. +// It is called by the crypto streams when a new message is available. +func (h *cryptoSetup) HandleMessage(data []byte, encLevel protocol.EncryptionLevel) error { + if err := h.handleMessage(data, encLevel); err != nil { + return wrapError(err) + } + return nil +} + +func (h *cryptoSetup) handleMessage(data []byte, encLevel protocol.EncryptionLevel) error { + if err := h.conn.HandleData(qtls.ToTLSEncryptionLevel(encLevel), data); err != nil { + return err + } + for { + ev := h.conn.NextEvent() + done, err := h.handleEvent(ev) + if err != nil { + return err + } + if done { + return nil + } + } +} + +func (h *cryptoSetup) handleEvent(ev tls.QUICEvent) (done bool, err error) { + //nolint:exhaustive + // Go 1.23 added new 0-RTT events, see https://github.com/quic-go/quic-go/issues/4272. + // We will start using these events when dropping support for Go 1.22. + switch ev.Kind { + case tls.QUICNoEvent: + return true, nil + case tls.QUICSetReadSecret: + h.setReadKey(ev.Level, ev.Suite, ev.Data) + return false, nil + case tls.QUICSetWriteSecret: + h.setWriteKey(ev.Level, ev.Suite, ev.Data) + return false, nil + case tls.QUICTransportParameters: + return false, h.handleTransportParameters(ev.Data) + case tls.QUICTransportParametersRequired: + h.conn.SetTransportParameters(h.ourParams.Marshal(h.perspective)) + return false, nil + case tls.QUICRejectedEarlyData: + h.rejected0RTT() + return false, nil + case tls.QUICWriteData: + h.writeRecord(ev.Level, ev.Data) + return false, nil + case tls.QUICHandshakeDone: + h.handshakeComplete() + return false, nil + default: + // Unknown events should be ignored. + // crypto/tls will ensure that this is safe to do. + // See the discussion following https://github.com/golang/go/issues/68124#issuecomment-2187042510 for details. + return false, nil + } +} + +func (h *cryptoSetup) NextEvent() Event { + if len(h.events) == 0 { + return Event{Kind: EventNoEvent} + } + ev := h.events[0] + h.events = h.events[1:] + return ev +} + +func (h *cryptoSetup) handleTransportParameters(data []byte) error { + var tp wire.TransportParameters + if err := tp.Unmarshal(data, h.perspective.Opposite()); err != nil { + return err + } + h.peerParams = &tp + h.events = append(h.events, Event{Kind: EventReceivedTransportParameters, TransportParameters: h.peerParams}) + return nil +} + +// must be called after receiving the transport parameters +func (h *cryptoSetup) marshalDataForSessionState(earlyData bool) []byte { + b := make([]byte, 0, 256) + b = quicvarint.Append(b, clientSessionStateRevision) + b = quicvarint.Append(b, uint64(h.rttStats.SmoothedRTT().Microseconds())) + if earlyData { + // only save the transport parameters for 0-RTT enabled session tickets + return h.peerParams.MarshalForSessionTicket(b) + } + return b +} + +func (h *cryptoSetup) handleDataFromSessionState(data []byte, earlyData bool) (allowEarlyData bool) { + rtt, tp, err := decodeDataFromSessionState(data, earlyData) + if err != nil { + h.logger.Debugf("Restoring of transport parameters from session ticket failed: %s", err.Error()) + return + } + h.rttStats.SetInitialRTT(rtt) + // The session ticket might have been saved from a connection that allowed 0-RTT, + // and therefore contain transport parameters. + // Only use them if 0-RTT is actually used on the new connection. + if tp != nil && h.allow0RTT { + h.zeroRTTParameters = tp + return true + } + return false +} + +func decodeDataFromSessionState(b []byte, earlyData bool) (time.Duration, *wire.TransportParameters, error) { + ver, l, err := quicvarint.Parse(b) + if err != nil { + return 0, nil, err + } + b = b[l:] + if ver != clientSessionStateRevision { + return 0, nil, fmt.Errorf("mismatching version. Got %d, expected %d", ver, clientSessionStateRevision) + } + rttEncoded, l, err := quicvarint.Parse(b) + if err != nil { + return 0, nil, err + } + b = b[l:] + rtt := time.Duration(rttEncoded) * time.Microsecond + if !earlyData { + return rtt, nil, nil + } + var tp wire.TransportParameters + if err := tp.UnmarshalFromSessionTicket(b); err != nil { + return 0, nil, err + } + return rtt, &tp, nil +} + +func (h *cryptoSetup) getDataForSessionTicket() []byte { + ticket := &sessionTicket{ + RTT: h.rttStats.SmoothedRTT(), + } + if h.allow0RTT { + ticket.Parameters = h.ourParams + } + return ticket.Marshal() +} + +// GetSessionTicket generates a new session ticket. +// Due to limitations in crypto/tls, it's only possible to generate a single session ticket per connection. +// It is only valid for the server. +func (h *cryptoSetup) GetSessionTicket() ([]byte, error) { + if err := h.conn.SendSessionTicket(tls.QUICSessionTicketOptions{EarlyData: h.allow0RTT}); err != nil { + // Session tickets might be disabled by tls.Config.SessionTicketsDisabled. + // We can't check h.tlsConfig here, since the actual config might have been obtained from + // the GetConfigForClient callback. + // See https://github.com/golang/go/issues/62032. + // Once that issue is resolved, this error assertion can be removed. + if strings.Contains(err.Error(), "session ticket keys unavailable") { + return nil, nil + } + return nil, err + } + ev := h.conn.NextEvent() + if ev.Kind != tls.QUICWriteData || ev.Level != tls.QUICEncryptionLevelApplication { + panic("crypto/tls bug: where's my session ticket?") + } + ticket := ev.Data + if ev := h.conn.NextEvent(); ev.Kind != tls.QUICNoEvent { + panic("crypto/tls bug: why more than one ticket?") + } + return ticket, nil +} + +// handleSessionTicket is called for the server when receiving the client's session ticket. +// It reads parameters from the session ticket and checks whether to accept 0-RTT if the session ticket enabled 0-RTT. +// Note that the fact that the session ticket allows 0-RTT doesn't mean that the actual TLS handshake enables 0-RTT: +// A client may use a 0-RTT enabled session to resume a TLS session without using 0-RTT. +func (h *cryptoSetup) handleSessionTicket(sessionTicketData []byte, using0RTT bool) bool { + var t sessionTicket + if err := t.Unmarshal(sessionTicketData, using0RTT); err != nil { + h.logger.Debugf("Unmarshalling session ticket failed: %s", err.Error()) + return false + } + h.rttStats.SetInitialRTT(t.RTT) + if !using0RTT { + return false + } + valid := h.ourParams.ValidFor0RTT(t.Parameters) + if !valid { + h.logger.Debugf("Transport parameters changed. Rejecting 0-RTT.") + return false + } + if !h.allow0RTT { + h.logger.Debugf("0-RTT not allowed. Rejecting 0-RTT.") + return false + } + h.logger.Debugf("Accepting 0-RTT. Restoring RTT from session ticket: %s", t.RTT) + return true +} + +// rejected0RTT is called for the client when the server rejects 0-RTT. +func (h *cryptoSetup) rejected0RTT() { + h.logger.Debugf("0-RTT was rejected. Dropping 0-RTT keys.") + + had0RTTKeys := h.zeroRTTSealer != nil + h.zeroRTTSealer = nil + + if had0RTTKeys { + h.events = append(h.events, Event{Kind: EventDiscard0RTTKeys}) + } +} + +func (h *cryptoSetup) setReadKey(el tls.QUICEncryptionLevel, suiteID uint16, trafficSecret []byte) { + suite := getCipherSuite(suiteID) + //nolint:exhaustive // The TLS stack doesn't export Initial keys. + switch el { + case tls.QUICEncryptionLevelEarly: + if h.perspective == protocol.PerspectiveClient { + panic("Received 0-RTT read key for the client") + } + h.zeroRTTOpener = newLongHeaderOpener( + createAEAD(suite, trafficSecret, h.version), + newHeaderProtector(suite, trafficSecret, true, h.version), + ) + h.used0RTT.Store(true) + if h.logger.Debug() { + h.logger.Debugf("Installed 0-RTT Read keys (using %s)", tls.CipherSuiteName(suite.ID)) + } + case tls.QUICEncryptionLevelHandshake: + h.handshakeOpener = newLongHeaderOpener( + createAEAD(suite, trafficSecret, h.version), + newHeaderProtector(suite, trafficSecret, true, h.version), + ) + if h.logger.Debug() { + h.logger.Debugf("Installed Handshake Read keys (using %s)", tls.CipherSuiteName(suite.ID)) + } + case tls.QUICEncryptionLevelApplication: + h.aead.SetReadKey(suite, trafficSecret) + h.has1RTTOpener = true + if h.logger.Debug() { + h.logger.Debugf("Installed 1-RTT Read keys (using %s)", tls.CipherSuiteName(suite.ID)) + } + default: + panic("unexpected read encryption level") + } + h.events = append(h.events, Event{Kind: EventReceivedReadKeys}) + if h.tracer != nil && h.tracer.UpdatedKeyFromTLS != nil { + h.tracer.UpdatedKeyFromTLS(qtls.FromTLSEncryptionLevel(el), h.perspective.Opposite()) + } +} + +func (h *cryptoSetup) setWriteKey(el tls.QUICEncryptionLevel, suiteID uint16, trafficSecret []byte) { + suite := getCipherSuite(suiteID) + //nolint:exhaustive // The TLS stack doesn't export Initial keys. + switch el { + case tls.QUICEncryptionLevelEarly: + if h.perspective == protocol.PerspectiveServer { + panic("Received 0-RTT write key for the server") + } + h.zeroRTTSealer = newLongHeaderSealer( + createAEAD(suite, trafficSecret, h.version), + newHeaderProtector(suite, trafficSecret, true, h.version), + ) + if h.logger.Debug() { + h.logger.Debugf("Installed 0-RTT Write keys (using %s)", tls.CipherSuiteName(suite.ID)) + } + if h.tracer != nil && h.tracer.UpdatedKeyFromTLS != nil { + h.tracer.UpdatedKeyFromTLS(protocol.Encryption0RTT, h.perspective) + } + // don't set used0RTT here. 0-RTT might still get rejected. + return + case tls.QUICEncryptionLevelHandshake: + h.handshakeSealer = newLongHeaderSealer( + createAEAD(suite, trafficSecret, h.version), + newHeaderProtector(suite, trafficSecret, true, h.version), + ) + if h.logger.Debug() { + h.logger.Debugf("Installed Handshake Write keys (using %s)", tls.CipherSuiteName(suite.ID)) + } + case tls.QUICEncryptionLevelApplication: + h.aead.SetWriteKey(suite, trafficSecret) + h.has1RTTSealer = true + if h.logger.Debug() { + h.logger.Debugf("Installed 1-RTT Write keys (using %s)", tls.CipherSuiteName(suite.ID)) + } + if h.zeroRTTSealer != nil { + // Once we receive handshake keys, we know that 0-RTT was not rejected. + h.used0RTT.Store(true) + h.zeroRTTSealer = nil + h.logger.Debugf("Dropping 0-RTT keys.") + if h.tracer != nil && h.tracer.DroppedEncryptionLevel != nil { + h.tracer.DroppedEncryptionLevel(protocol.Encryption0RTT) + } + } + default: + panic("unexpected write encryption level") + } + if h.tracer != nil && h.tracer.UpdatedKeyFromTLS != nil { + h.tracer.UpdatedKeyFromTLS(qtls.FromTLSEncryptionLevel(el), h.perspective) + } +} + +// writeRecord is called when TLS writes data +func (h *cryptoSetup) writeRecord(encLevel tls.QUICEncryptionLevel, p []byte) { + //nolint:exhaustive // handshake records can only be written for Initial and Handshake. + switch encLevel { + case tls.QUICEncryptionLevelInitial: + h.events = append(h.events, Event{Kind: EventWriteInitialData, Data: p}) + case tls.QUICEncryptionLevelHandshake: + h.events = append(h.events, Event{Kind: EventWriteHandshakeData, Data: p}) + case tls.QUICEncryptionLevelApplication: + panic("unexpected write") + default: + panic(fmt.Sprintf("unexpected write encryption level: %s", encLevel)) + } +} + +func (h *cryptoSetup) DiscardInitialKeys() { + dropped := h.initialOpener != nil + h.initialOpener = nil + h.initialSealer = nil + if dropped { + h.logger.Debugf("Dropping Initial keys.") + } +} + +func (h *cryptoSetup) handshakeComplete() { + h.handshakeCompleteTime = time.Now() + h.events = append(h.events, Event{Kind: EventHandshakeComplete}) +} + +func (h *cryptoSetup) SetHandshakeConfirmed() { + h.aead.SetHandshakeConfirmed() + // drop Handshake keys + var dropped bool + if h.handshakeOpener != nil { + h.handshakeOpener = nil + h.handshakeSealer = nil + dropped = true + } + if dropped { + h.logger.Debugf("Dropping Handshake keys.") + } +} + +func (h *cryptoSetup) GetInitialSealer() (LongHeaderSealer, error) { + if h.initialSealer == nil { + return nil, ErrKeysDropped + } + return h.initialSealer, nil +} + +func (h *cryptoSetup) Get0RTTSealer() (LongHeaderSealer, error) { + if h.zeroRTTSealer == nil { + return nil, ErrKeysDropped + } + return h.zeroRTTSealer, nil +} + +func (h *cryptoSetup) GetHandshakeSealer() (LongHeaderSealer, error) { + if h.handshakeSealer == nil { + if h.initialSealer == nil { + return nil, ErrKeysDropped + } + return nil, ErrKeysNotYetAvailable + } + return h.handshakeSealer, nil +} + +func (h *cryptoSetup) Get1RTTSealer() (ShortHeaderSealer, error) { + if !h.has1RTTSealer { + return nil, ErrKeysNotYetAvailable + } + return h.aead, nil +} + +func (h *cryptoSetup) GetInitialOpener() (LongHeaderOpener, error) { + if h.initialOpener == nil { + return nil, ErrKeysDropped + } + return h.initialOpener, nil +} + +func (h *cryptoSetup) Get0RTTOpener() (LongHeaderOpener, error) { + if h.zeroRTTOpener == nil { + if h.initialOpener != nil { + return nil, ErrKeysNotYetAvailable + } + // if the initial opener is also not available, the keys were already dropped + return nil, ErrKeysDropped + } + return h.zeroRTTOpener, nil +} + +func (h *cryptoSetup) GetHandshakeOpener() (LongHeaderOpener, error) { + if h.handshakeOpener == nil { + if h.initialOpener != nil { + return nil, ErrKeysNotYetAvailable + } + // if the initial opener is also not available, the keys were already dropped + return nil, ErrKeysDropped + } + return h.handshakeOpener, nil +} + +func (h *cryptoSetup) Get1RTTOpener() (ShortHeaderOpener, error) { + if h.zeroRTTOpener != nil && time.Since(h.handshakeCompleteTime) > 3*h.rttStats.PTO(true) { + h.zeroRTTOpener = nil + h.logger.Debugf("Dropping 0-RTT keys.") + if h.tracer != nil && h.tracer.DroppedEncryptionLevel != nil { + h.tracer.DroppedEncryptionLevel(protocol.Encryption0RTT) + } + } + + if !h.has1RTTOpener { + return nil, ErrKeysNotYetAvailable + } + return h.aead, nil +} + +func (h *cryptoSetup) ConnectionState() ConnectionState { + return ConnectionState{ + ConnectionState: h.conn.ConnectionState(), + Used0RTT: h.used0RTT.Load(), + } +} + +func wrapError(err error) error { + if alertErr := tls.AlertError(0); errors.As(err, &alertErr) { + return qerr.NewLocalCryptoError(uint8(alertErr), err) + } + return &qerr.TransportError{ErrorCode: qerr.InternalError, ErrorMessage: err.Error()} +} diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/header_protector.go b/vendor/github.com/quic-go/quic-go/internal/handshake/header_protector.go new file mode 100644 index 00000000..2c5ee42f --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/header_protector.go @@ -0,0 +1,134 @@ +package handshake + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/tls" + "encoding/binary" + "fmt" + + "golang.org/x/crypto/chacha20" + + "github.com/quic-go/quic-go/internal/protocol" +) + +type headerProtector interface { + EncryptHeader(sample []byte, firstByte *byte, hdrBytes []byte) + DecryptHeader(sample []byte, firstByte *byte, hdrBytes []byte) +} + +func hkdfHeaderProtectionLabel(v protocol.Version) string { + if v == protocol.Version2 { + return "quicv2 hp" + } + return "quic hp" +} + +func newHeaderProtector(suite *cipherSuite, trafficSecret []byte, isLongHeader bool, v protocol.Version) headerProtector { + hkdfLabel := hkdfHeaderProtectionLabel(v) + switch suite.ID { + case tls.TLS_AES_128_GCM_SHA256, tls.TLS_AES_256_GCM_SHA384: + return newAESHeaderProtector(suite, trafficSecret, isLongHeader, hkdfLabel) + case tls.TLS_CHACHA20_POLY1305_SHA256: + return newChaChaHeaderProtector(suite, trafficSecret, isLongHeader, hkdfLabel) + default: + panic(fmt.Sprintf("Invalid cipher suite id: %d", suite.ID)) + } +} + +type aesHeaderProtector struct { + mask [16]byte // AES always has a 16 byte block size + block cipher.Block + isLongHeader bool +} + +var _ headerProtector = &aesHeaderProtector{} + +func newAESHeaderProtector(suite *cipherSuite, trafficSecret []byte, isLongHeader bool, hkdfLabel string) headerProtector { + hpKey := hkdfExpandLabel(suite.Hash, trafficSecret, []byte{}, hkdfLabel, suite.KeyLen) + block, err := aes.NewCipher(hpKey) + if err != nil { + panic(fmt.Sprintf("error creating new AES cipher: %s", err)) + } + return &aesHeaderProtector{ + block: block, + isLongHeader: isLongHeader, + } +} + +func (p *aesHeaderProtector) DecryptHeader(sample []byte, firstByte *byte, hdrBytes []byte) { + p.apply(sample, firstByte, hdrBytes) +} + +func (p *aesHeaderProtector) EncryptHeader(sample []byte, firstByte *byte, hdrBytes []byte) { + p.apply(sample, firstByte, hdrBytes) +} + +func (p *aesHeaderProtector) apply(sample []byte, firstByte *byte, hdrBytes []byte) { + if len(sample) != len(p.mask) { + panic("invalid sample size") + } + p.block.Encrypt(p.mask[:], sample) + if p.isLongHeader { + *firstByte ^= p.mask[0] & 0xf + } else { + *firstByte ^= p.mask[0] & 0x1f + } + for i := range hdrBytes { + hdrBytes[i] ^= p.mask[i+1] + } +} + +type chachaHeaderProtector struct { + mask [5]byte + + key [32]byte + isLongHeader bool +} + +var _ headerProtector = &chachaHeaderProtector{} + +func newChaChaHeaderProtector(suite *cipherSuite, trafficSecret []byte, isLongHeader bool, hkdfLabel string) headerProtector { + hpKey := hkdfExpandLabel(suite.Hash, trafficSecret, []byte{}, hkdfLabel, suite.KeyLen) + + p := &chachaHeaderProtector{ + isLongHeader: isLongHeader, + } + copy(p.key[:], hpKey) + return p +} + +func (p *chachaHeaderProtector) DecryptHeader(sample []byte, firstByte *byte, hdrBytes []byte) { + p.apply(sample, firstByte, hdrBytes) +} + +func (p *chachaHeaderProtector) EncryptHeader(sample []byte, firstByte *byte, hdrBytes []byte) { + p.apply(sample, firstByte, hdrBytes) +} + +func (p *chachaHeaderProtector) apply(sample []byte, firstByte *byte, hdrBytes []byte) { + if len(sample) != 16 { + panic("invalid sample size") + } + for i := 0; i < 5; i++ { + p.mask[i] = 0 + } + cipher, err := chacha20.NewUnauthenticatedCipher(p.key[:], sample[4:]) + if err != nil { + panic(err) + } + cipher.SetCounter(binary.LittleEndian.Uint32(sample[:4])) + cipher.XORKeyStream(p.mask[:], p.mask[:]) + p.applyMask(firstByte, hdrBytes) +} + +func (p *chachaHeaderProtector) applyMask(firstByte *byte, hdrBytes []byte) { + if p.isLongHeader { + *firstByte ^= p.mask[0] & 0xf + } else { + *firstByte ^= p.mask[0] & 0x1f + } + for i := range hdrBytes { + hdrBytes[i] ^= p.mask[i+1] + } +} diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/hkdf.go b/vendor/github.com/quic-go/quic-go/internal/handshake/hkdf.go new file mode 100644 index 00000000..0caf1c8e --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/hkdf.go @@ -0,0 +1,29 @@ +package handshake + +import ( + "crypto" + "encoding/binary" + + "golang.org/x/crypto/hkdf" +) + +// hkdfExpandLabel HKDF expands a label as defined in RFC 8446, section 7.1. +// Since this implementation avoids using a cryptobyte.Builder, it is about 15% faster than the +// hkdfExpandLabel in the standard library. +func hkdfExpandLabel(hash crypto.Hash, secret, context []byte, label string, length int) []byte { + b := make([]byte, 3, 3+6+len(label)+1+len(context)) + binary.BigEndian.PutUint16(b, uint16(length)) + b[2] = uint8(6 + len(label)) + b = append(b, []byte("tls13 ")...) + b = append(b, []byte(label)...) + b = b[:3+6+len(label)+1] + b[3+6+len(label)] = uint8(len(context)) + b = append(b, context...) + + out := make([]byte, length) + n, err := hkdf.Expand(hash.New, secret, b).Read(out) + if err != nil || n != length { + panic("quic: HKDF-Expand-Label invocation failed unexpectedly") + } + return out +} diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/initial_aead.go b/vendor/github.com/quic-go/quic-go/internal/handshake/initial_aead.go new file mode 100644 index 00000000..b8aa7e3e --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/initial_aead.go @@ -0,0 +1,71 @@ +package handshake + +import ( + "crypto" + "crypto/tls" + + "golang.org/x/crypto/hkdf" + + "github.com/quic-go/quic-go/internal/protocol" +) + +var ( + quicSaltV1 = []byte{0x38, 0x76, 0x2c, 0xf7, 0xf5, 0x59, 0x34, 0xb3, 0x4d, 0x17, 0x9a, 0xe6, 0xa4, 0xc8, 0x0c, 0xad, 0xcc, 0xbb, 0x7f, 0x0a} + quicSaltV2 = []byte{0x0d, 0xed, 0xe3, 0xde, 0xf7, 0x00, 0xa6, 0xdb, 0x81, 0x93, 0x81, 0xbe, 0x6e, 0x26, 0x9d, 0xcb, 0xf9, 0xbd, 0x2e, 0xd9} +) + +const ( + hkdfLabelKeyV1 = "quic key" + hkdfLabelKeyV2 = "quicv2 key" + hkdfLabelIVV1 = "quic iv" + hkdfLabelIVV2 = "quicv2 iv" +) + +func getSalt(v protocol.Version) []byte { + if v == protocol.Version2 { + return quicSaltV2 + } + return quicSaltV1 +} + +var initialSuite = getCipherSuite(tls.TLS_AES_128_GCM_SHA256) + +// NewInitialAEAD creates a new AEAD for Initial encryption / decryption. +func NewInitialAEAD(connID protocol.ConnectionID, pers protocol.Perspective, v protocol.Version) (LongHeaderSealer, LongHeaderOpener) { + clientSecret, serverSecret := computeSecrets(connID, v) + var mySecret, otherSecret []byte + if pers == protocol.PerspectiveClient { + mySecret = clientSecret + otherSecret = serverSecret + } else { + mySecret = serverSecret + otherSecret = clientSecret + } + myKey, myIV := computeInitialKeyAndIV(mySecret, v) + otherKey, otherIV := computeInitialKeyAndIV(otherSecret, v) + + encrypter := initialSuite.AEAD(myKey, myIV) + decrypter := initialSuite.AEAD(otherKey, otherIV) + + return newLongHeaderSealer(encrypter, newHeaderProtector(initialSuite, mySecret, true, v)), + newLongHeaderOpener(decrypter, newAESHeaderProtector(initialSuite, otherSecret, true, hkdfHeaderProtectionLabel(v))) +} + +func computeSecrets(connID protocol.ConnectionID, v protocol.Version) (clientSecret, serverSecret []byte) { + initialSecret := hkdf.Extract(crypto.SHA256.New, connID.Bytes(), getSalt(v)) + clientSecret = hkdfExpandLabel(crypto.SHA256, initialSecret, []byte{}, "client in", crypto.SHA256.Size()) + serverSecret = hkdfExpandLabel(crypto.SHA256, initialSecret, []byte{}, "server in", crypto.SHA256.Size()) + return +} + +func computeInitialKeyAndIV(secret []byte, v protocol.Version) (key, iv []byte) { + keyLabel := hkdfLabelKeyV1 + ivLabel := hkdfLabelIVV1 + if v == protocol.Version2 { + keyLabel = hkdfLabelKeyV2 + ivLabel = hkdfLabelIVV2 + } + key = hkdfExpandLabel(crypto.SHA256, secret, []byte{}, keyLabel, 16) + iv = hkdfExpandLabel(crypto.SHA256, secret, []byte{}, ivLabel, 12) + return +} diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/interface.go b/vendor/github.com/quic-go/quic-go/internal/handshake/interface.go new file mode 100644 index 00000000..6e0eec77 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/interface.go @@ -0,0 +1,117 @@ +package handshake + +import ( + "context" + "crypto/tls" + "errors" + "io" + "time" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/wire" +) + +var ( + // ErrKeysNotYetAvailable is returned when an opener or a sealer is requested for an encryption level, + // but the corresponding opener has not yet been initialized + // This can happen when packets arrive out of order. + ErrKeysNotYetAvailable = errors.New("CryptoSetup: keys at this encryption level not yet available") + // ErrKeysDropped is returned when an opener or a sealer is requested for an encryption level, + // but the corresponding keys have already been dropped. + ErrKeysDropped = errors.New("CryptoSetup: keys were already dropped") + // ErrDecryptionFailed is returned when the AEAD fails to open the packet. + ErrDecryptionFailed = errors.New("decryption failed") +) + +type headerDecryptor interface { + DecryptHeader(sample []byte, firstByte *byte, pnBytes []byte) +} + +// LongHeaderOpener opens a long header packet +type LongHeaderOpener interface { + headerDecryptor + DecodePacketNumber(wirePN protocol.PacketNumber, wirePNLen protocol.PacketNumberLen) protocol.PacketNumber + Open(dst, src []byte, pn protocol.PacketNumber, associatedData []byte) ([]byte, error) +} + +// ShortHeaderOpener opens a short header packet +type ShortHeaderOpener interface { + headerDecryptor + DecodePacketNumber(wirePN protocol.PacketNumber, wirePNLen protocol.PacketNumberLen) protocol.PacketNumber + Open(dst, src []byte, rcvTime time.Time, pn protocol.PacketNumber, kp protocol.KeyPhaseBit, associatedData []byte) ([]byte, error) +} + +// LongHeaderSealer seals a long header packet +type LongHeaderSealer interface { + Seal(dst, src []byte, packetNumber protocol.PacketNumber, associatedData []byte) []byte + EncryptHeader(sample []byte, firstByte *byte, pnBytes []byte) + Overhead() int +} + +// ShortHeaderSealer seals a short header packet +type ShortHeaderSealer interface { + LongHeaderSealer + KeyPhase() protocol.KeyPhaseBit +} + +type ConnectionState struct { + tls.ConnectionState + Used0RTT bool +} + +// EventKind is the kind of handshake event. +type EventKind uint8 + +const ( + // EventNoEvent signals that there are no new handshake events + EventNoEvent EventKind = iota + 1 + // EventWriteInitialData contains new CRYPTO data to send at the Initial encryption level + EventWriteInitialData + // EventWriteHandshakeData contains new CRYPTO data to send at the Handshake encryption level + EventWriteHandshakeData + // EventReceivedReadKeys signals that new decryption keys are available. + // It doesn't say which encryption level those keys are for. + EventReceivedReadKeys + // EventDiscard0RTTKeys signals that the Handshake keys were discarded. + EventDiscard0RTTKeys + // EventReceivedTransportParameters contains the transport parameters sent by the peer. + EventReceivedTransportParameters + // EventRestoredTransportParameters contains the transport parameters restored from the session ticket. + // It is only used for the client. + EventRestoredTransportParameters + // EventHandshakeComplete signals that the TLS handshake was completed. + EventHandshakeComplete +) + +// Event is a handshake event. +type Event struct { + Kind EventKind + Data []byte + TransportParameters *wire.TransportParameters +} + +// CryptoSetup handles the handshake and protecting / unprotecting packets +type CryptoSetup interface { + StartHandshake(context.Context) error + io.Closer + ChangeConnectionID(protocol.ConnectionID) + GetSessionTicket() ([]byte, error) + + HandleMessage([]byte, protocol.EncryptionLevel) error + NextEvent() Event + + SetLargest1RTTAcked(protocol.PacketNumber) error + DiscardInitialKeys() + SetHandshakeConfirmed() + ConnectionState() ConnectionState + + GetInitialOpener() (LongHeaderOpener, error) + GetHandshakeOpener() (LongHeaderOpener, error) + Get0RTTOpener() (LongHeaderOpener, error) + Get1RTTOpener() (ShortHeaderOpener, error) + + GetInitialSealer() (LongHeaderSealer, error) + GetHandshakeSealer() (LongHeaderSealer, error) + Get0RTTSealer() (LongHeaderSealer, error) + Get1RTTSealer() (ShortHeaderSealer, error) +} diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/retry.go b/vendor/github.com/quic-go/quic-go/internal/handshake/retry.go new file mode 100644 index 00000000..30643cdf --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/retry.go @@ -0,0 +1,63 @@ +package handshake + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "fmt" + "sync" + + "github.com/quic-go/quic-go/internal/protocol" +) + +var ( + retryAEADv1 cipher.AEAD // used for QUIC v1 (RFC 9000) + retryAEADv2 cipher.AEAD // used for QUIC v2 (RFC 9369) +) + +func init() { + retryAEADv1 = initAEAD([16]byte{0xbe, 0x0c, 0x69, 0x0b, 0x9f, 0x66, 0x57, 0x5a, 0x1d, 0x76, 0x6b, 0x54, 0xe3, 0x68, 0xc8, 0x4e}) + retryAEADv2 = initAEAD([16]byte{0x8f, 0xb4, 0xb0, 0x1b, 0x56, 0xac, 0x48, 0xe2, 0x60, 0xfb, 0xcb, 0xce, 0xad, 0x7c, 0xcc, 0x92}) +} + +func initAEAD(key [16]byte) cipher.AEAD { + aes, err := aes.NewCipher(key[:]) + if err != nil { + panic(err) + } + aead, err := cipher.NewGCM(aes) + if err != nil { + panic(err) + } + return aead +} + +var ( + retryBuf bytes.Buffer + retryMutex sync.Mutex + retryNonceV1 = [12]byte{0x46, 0x15, 0x99, 0xd3, 0x5d, 0x63, 0x2b, 0xf2, 0x23, 0x98, 0x25, 0xbb} + retryNonceV2 = [12]byte{0xd8, 0x69, 0x69, 0xbc, 0x2d, 0x7c, 0x6d, 0x99, 0x90, 0xef, 0xb0, 0x4a} +) + +// GetRetryIntegrityTag calculates the integrity tag on a Retry packet +func GetRetryIntegrityTag(retry []byte, origDestConnID protocol.ConnectionID, version protocol.Version) *[16]byte { + retryMutex.Lock() + defer retryMutex.Unlock() + + retryBuf.WriteByte(uint8(origDestConnID.Len())) + retryBuf.Write(origDestConnID.Bytes()) + retryBuf.Write(retry) + defer retryBuf.Reset() + + var tag [16]byte + var sealed []byte + if version == protocol.Version2 { + sealed = retryAEADv2.Seal(tag[:0], retryNonceV2[:], nil, retryBuf.Bytes()) + } else { + sealed = retryAEADv1.Seal(tag[:0], retryNonceV1[:], nil, retryBuf.Bytes()) + } + if len(sealed) != 16 { + panic(fmt.Sprintf("unexpected Retry integrity tag length: %d", len(sealed))) + } + return &tag +} diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/session_ticket.go b/vendor/github.com/quic-go/quic-go/internal/handshake/session_ticket.go new file mode 100644 index 00000000..b67f0101 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/session_ticket.go @@ -0,0 +1,54 @@ +package handshake + +import ( + "errors" + "fmt" + "time" + + "github.com/quic-go/quic-go/internal/wire" + "github.com/quic-go/quic-go/quicvarint" +) + +const sessionTicketRevision = 4 + +type sessionTicket struct { + Parameters *wire.TransportParameters + RTT time.Duration // to be encoded in mus +} + +func (t *sessionTicket) Marshal() []byte { + b := make([]byte, 0, 256) + b = quicvarint.Append(b, sessionTicketRevision) + b = quicvarint.Append(b, uint64(t.RTT.Microseconds())) + if t.Parameters == nil { + return b + } + return t.Parameters.MarshalForSessionTicket(b) +} + +func (t *sessionTicket) Unmarshal(b []byte, using0RTT bool) error { + rev, l, err := quicvarint.Parse(b) + if err != nil { + return errors.New("failed to read session ticket revision") + } + b = b[l:] + if rev != sessionTicketRevision { + return fmt.Errorf("unknown session ticket revision: %d", rev) + } + rtt, l, err := quicvarint.Parse(b) + if err != nil { + return errors.New("failed to read RTT") + } + b = b[l:] + if using0RTT { + var tp wire.TransportParameters + if err := tp.UnmarshalFromSessionTicket(b); err != nil { + return fmt.Errorf("unmarshaling transport parameters from session ticket failed: %s", err.Error()) + } + t.Parameters = &tp + } else if len(b) > 0 { + return fmt.Errorf("the session ticket has more bytes than expected") + } + t.RTT = time.Duration(rtt) * time.Microsecond + return nil +} diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/token_generator.go b/vendor/github.com/quic-go/quic-go/internal/handshake/token_generator.go new file mode 100644 index 00000000..84e58cfc --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/token_generator.go @@ -0,0 +1,120 @@ +package handshake + +import ( + "bytes" + "encoding/asn1" + "fmt" + "net" + "time" + + "github.com/quic-go/quic-go/internal/protocol" +) + +const ( + tokenPrefixIP byte = iota + tokenPrefixString +) + +// A Token is derived from the client address and can be used to verify the ownership of this address. +type Token struct { + IsRetryToken bool + SentTime time.Time + encodedRemoteAddr []byte + // only set for retry tokens + OriginalDestConnectionID protocol.ConnectionID + RetrySrcConnectionID protocol.ConnectionID +} + +// ValidateRemoteAddr validates the address, but does not check expiration +func (t *Token) ValidateRemoteAddr(addr net.Addr) bool { + return bytes.Equal(encodeRemoteAddr(addr), t.encodedRemoteAddr) +} + +// token is the struct that is used for ASN1 serialization and deserialization +type token struct { + IsRetryToken bool + RemoteAddr []byte + Timestamp int64 + OriginalDestConnectionID []byte + RetrySrcConnectionID []byte +} + +// A TokenGenerator generates tokens +type TokenGenerator struct { + tokenProtector tokenProtector +} + +// NewTokenGenerator initializes a new TokenGenerator +func NewTokenGenerator(key TokenProtectorKey) *TokenGenerator { + return &TokenGenerator{tokenProtector: *newTokenProtector(key)} +} + +// NewRetryToken generates a new token for a Retry for a given source address +func (g *TokenGenerator) NewRetryToken( + raddr net.Addr, + origDestConnID protocol.ConnectionID, + retrySrcConnID protocol.ConnectionID, +) ([]byte, error) { + data, err := asn1.Marshal(token{ + IsRetryToken: true, + RemoteAddr: encodeRemoteAddr(raddr), + OriginalDestConnectionID: origDestConnID.Bytes(), + RetrySrcConnectionID: retrySrcConnID.Bytes(), + Timestamp: time.Now().UnixNano(), + }) + if err != nil { + return nil, err + } + return g.tokenProtector.NewToken(data) +} + +// NewToken generates a new token to be sent in a NEW_TOKEN frame +func (g *TokenGenerator) NewToken(raddr net.Addr) ([]byte, error) { + data, err := asn1.Marshal(token{ + RemoteAddr: encodeRemoteAddr(raddr), + Timestamp: time.Now().UnixNano(), + }) + if err != nil { + return nil, err + } + return g.tokenProtector.NewToken(data) +} + +// DecodeToken decodes a token +func (g *TokenGenerator) DecodeToken(encrypted []byte) (*Token, error) { + // if the client didn't send any token, DecodeToken will be called with a nil-slice + if len(encrypted) == 0 { + return nil, nil + } + + data, err := g.tokenProtector.DecodeToken(encrypted) + if err != nil { + return nil, err + } + t := &token{} + rest, err := asn1.Unmarshal(data, t) + if err != nil { + return nil, err + } + if len(rest) != 0 { + return nil, fmt.Errorf("rest when unpacking token: %d", len(rest)) + } + token := &Token{ + IsRetryToken: t.IsRetryToken, + SentTime: time.Unix(0, t.Timestamp), + encodedRemoteAddr: t.RemoteAddr, + } + if t.IsRetryToken { + token.OriginalDestConnectionID = protocol.ParseConnectionID(t.OriginalDestConnectionID) + token.RetrySrcConnectionID = protocol.ParseConnectionID(t.RetrySrcConnectionID) + } + return token, nil +} + +// encodeRemoteAddr encodes a remote address such that it can be saved in the token +func encodeRemoteAddr(remoteAddr net.Addr) []byte { + if udpAddr, ok := remoteAddr.(*net.UDPAddr); ok { + return append([]byte{tokenPrefixIP}, udpAddr.IP...) + } + return append([]byte{tokenPrefixString}, []byte(remoteAddr.String())...) +} diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/token_protector.go b/vendor/github.com/quic-go/quic-go/internal/handshake/token_protector.go new file mode 100644 index 00000000..15779189 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/token_protector.go @@ -0,0 +1,74 @@ +package handshake + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "crypto/sha256" + "fmt" + "io" + + "golang.org/x/crypto/hkdf" +) + +// TokenProtectorKey is the key used to encrypt both Retry and session resumption tokens. +type TokenProtectorKey [32]byte + +const tokenNonceSize = 32 + +// tokenProtector is used to create and verify a token +type tokenProtector struct { + key TokenProtectorKey +} + +// newTokenProtector creates a source for source address tokens +func newTokenProtector(key TokenProtectorKey) *tokenProtector { + return &tokenProtector{key: key} +} + +// NewToken encodes data into a new token. +func (s *tokenProtector) NewToken(data []byte) ([]byte, error) { + var nonce [tokenNonceSize]byte + if _, err := rand.Read(nonce[:]); err != nil { + return nil, err + } + aead, aeadNonce, err := s.createAEAD(nonce[:]) + if err != nil { + return nil, err + } + return append(nonce[:], aead.Seal(nil, aeadNonce, data, nil)...), nil +} + +// DecodeToken decodes a token. +func (s *tokenProtector) DecodeToken(p []byte) ([]byte, error) { + if len(p) < tokenNonceSize { + return nil, fmt.Errorf("token too short: %d", len(p)) + } + nonce := p[:tokenNonceSize] + aead, aeadNonce, err := s.createAEAD(nonce) + if err != nil { + return nil, err + } + return aead.Open(nil, aeadNonce, p[tokenNonceSize:], nil) +} + +func (s *tokenProtector) createAEAD(nonce []byte) (cipher.AEAD, []byte, error) { + h := hkdf.New(sha256.New, s.key[:], nonce, []byte("quic-go token source")) + key := make([]byte, 32) // use a 32 byte key, in order to select AES-256 + if _, err := io.ReadFull(h, key); err != nil { + return nil, nil, err + } + aeadNonce := make([]byte, 12) + if _, err := io.ReadFull(h, aeadNonce); err != nil { + return nil, nil, err + } + c, err := aes.NewCipher(key) + if err != nil { + return nil, nil, err + } + aead, err := cipher.NewGCM(c) + if err != nil { + return nil, nil, err + } + return aead, aeadNonce, nil +} diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/updatable_aead.go b/vendor/github.com/quic-go/quic-go/internal/handshake/updatable_aead.go new file mode 100644 index 00000000..ceaa8047 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/updatable_aead.go @@ -0,0 +1,332 @@ +package handshake + +import ( + "crypto" + "crypto/cipher" + "crypto/tls" + "encoding/binary" + "fmt" + "time" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/logging" +) + +// KeyUpdateInterval is the maximum number of packets we send or receive before initiating a key update. +// It's a package-level variable to allow modifying it for testing purposes. +var KeyUpdateInterval uint64 = protocol.KeyUpdateInterval + +// FirstKeyUpdateInterval is the maximum number of packets we send or receive before initiating the first key update. +// It's a package-level variable to allow modifying it for testing purposes. +var FirstKeyUpdateInterval uint64 = 100 + +type updatableAEAD struct { + suite *cipherSuite + + keyPhase protocol.KeyPhase + largestAcked protocol.PacketNumber + firstPacketNumber protocol.PacketNumber + handshakeConfirmed bool + + invalidPacketLimit uint64 + invalidPacketCount uint64 + + // Time when the keys should be dropped. Keys are dropped on the next call to Open(). + prevRcvAEADExpiry time.Time + prevRcvAEAD cipher.AEAD + + firstRcvdWithCurrentKey protocol.PacketNumber + firstSentWithCurrentKey protocol.PacketNumber + highestRcvdPN protocol.PacketNumber // highest packet number received (which could be successfully unprotected) + numRcvdWithCurrentKey uint64 + numSentWithCurrentKey uint64 + rcvAEAD cipher.AEAD + sendAEAD cipher.AEAD + // caches cipher.AEAD.Overhead(). This speeds up calls to Overhead(). + aeadOverhead int + + nextRcvAEAD cipher.AEAD + nextSendAEAD cipher.AEAD + nextRcvTrafficSecret []byte + nextSendTrafficSecret []byte + + headerDecrypter headerProtector + headerEncrypter headerProtector + + rttStats *utils.RTTStats + + tracer *logging.ConnectionTracer + logger utils.Logger + version protocol.Version + + // use a single slice to avoid allocations + nonceBuf []byte +} + +var ( + _ ShortHeaderOpener = &updatableAEAD{} + _ ShortHeaderSealer = &updatableAEAD{} +) + +func newUpdatableAEAD(rttStats *utils.RTTStats, tracer *logging.ConnectionTracer, logger utils.Logger, version protocol.Version) *updatableAEAD { + return &updatableAEAD{ + firstPacketNumber: protocol.InvalidPacketNumber, + largestAcked: protocol.InvalidPacketNumber, + firstRcvdWithCurrentKey: protocol.InvalidPacketNumber, + firstSentWithCurrentKey: protocol.InvalidPacketNumber, + rttStats: rttStats, + tracer: tracer, + logger: logger, + version: version, + } +} + +func (a *updatableAEAD) rollKeys() { + if a.prevRcvAEAD != nil { + a.logger.Debugf("Dropping key phase %d ahead of scheduled time. Drop time was: %s", a.keyPhase-1, a.prevRcvAEADExpiry) + if a.tracer != nil && a.tracer.DroppedKey != nil { + a.tracer.DroppedKey(a.keyPhase - 1) + } + a.prevRcvAEADExpiry = time.Time{} + } + + a.keyPhase++ + a.firstRcvdWithCurrentKey = protocol.InvalidPacketNumber + a.firstSentWithCurrentKey = protocol.InvalidPacketNumber + a.numRcvdWithCurrentKey = 0 + a.numSentWithCurrentKey = 0 + a.prevRcvAEAD = a.rcvAEAD + a.rcvAEAD = a.nextRcvAEAD + a.sendAEAD = a.nextSendAEAD + + a.nextRcvTrafficSecret = a.getNextTrafficSecret(a.suite.Hash, a.nextRcvTrafficSecret) + a.nextSendTrafficSecret = a.getNextTrafficSecret(a.suite.Hash, a.nextSendTrafficSecret) + a.nextRcvAEAD = createAEAD(a.suite, a.nextRcvTrafficSecret, a.version) + a.nextSendAEAD = createAEAD(a.suite, a.nextSendTrafficSecret, a.version) +} + +func (a *updatableAEAD) startKeyDropTimer(now time.Time) { + d := 3 * a.rttStats.PTO(true) + a.logger.Debugf("Starting key drop timer to drop key phase %d (in %s)", a.keyPhase-1, d) + a.prevRcvAEADExpiry = now.Add(d) +} + +func (a *updatableAEAD) getNextTrafficSecret(hash crypto.Hash, ts []byte) []byte { + return hkdfExpandLabel(hash, ts, []byte{}, "quic ku", hash.Size()) +} + +// SetReadKey sets the read key. +// For the client, this function is called before SetWriteKey. +// For the server, this function is called after SetWriteKey. +func (a *updatableAEAD) SetReadKey(suite *cipherSuite, trafficSecret []byte) { + a.rcvAEAD = createAEAD(suite, trafficSecret, a.version) + a.headerDecrypter = newHeaderProtector(suite, trafficSecret, false, a.version) + if a.suite == nil { + a.setAEADParameters(a.rcvAEAD, suite) + } + + a.nextRcvTrafficSecret = a.getNextTrafficSecret(suite.Hash, trafficSecret) + a.nextRcvAEAD = createAEAD(suite, a.nextRcvTrafficSecret, a.version) +} + +// SetWriteKey sets the write key. +// For the client, this function is called after SetReadKey. +// For the server, this function is called before SetReadKey. +func (a *updatableAEAD) SetWriteKey(suite *cipherSuite, trafficSecret []byte) { + a.sendAEAD = createAEAD(suite, trafficSecret, a.version) + a.headerEncrypter = newHeaderProtector(suite, trafficSecret, false, a.version) + if a.suite == nil { + a.setAEADParameters(a.sendAEAD, suite) + } + + a.nextSendTrafficSecret = a.getNextTrafficSecret(suite.Hash, trafficSecret) + a.nextSendAEAD = createAEAD(suite, a.nextSendTrafficSecret, a.version) +} + +func (a *updatableAEAD) setAEADParameters(aead cipher.AEAD, suite *cipherSuite) { + a.nonceBuf = make([]byte, aead.NonceSize()) + a.aeadOverhead = aead.Overhead() + a.suite = suite + switch suite.ID { + case tls.TLS_AES_128_GCM_SHA256, tls.TLS_AES_256_GCM_SHA384: + a.invalidPacketLimit = protocol.InvalidPacketLimitAES + case tls.TLS_CHACHA20_POLY1305_SHA256: + a.invalidPacketLimit = protocol.InvalidPacketLimitChaCha + default: + panic(fmt.Sprintf("unknown cipher suite %d", suite.ID)) + } +} + +func (a *updatableAEAD) DecodePacketNumber(wirePN protocol.PacketNumber, wirePNLen protocol.PacketNumberLen) protocol.PacketNumber { + return protocol.DecodePacketNumber(wirePNLen, a.highestRcvdPN, wirePN) +} + +func (a *updatableAEAD) Open(dst, src []byte, rcvTime time.Time, pn protocol.PacketNumber, kp protocol.KeyPhaseBit, ad []byte) ([]byte, error) { + dec, err := a.open(dst, src, rcvTime, pn, kp, ad) + if err == ErrDecryptionFailed { + a.invalidPacketCount++ + if a.invalidPacketCount >= a.invalidPacketLimit { + return nil, &qerr.TransportError{ErrorCode: qerr.AEADLimitReached} + } + } + if err == nil { + a.highestRcvdPN = max(a.highestRcvdPN, pn) + } + return dec, err +} + +func (a *updatableAEAD) open(dst, src []byte, rcvTime time.Time, pn protocol.PacketNumber, kp protocol.KeyPhaseBit, ad []byte) ([]byte, error) { + if a.prevRcvAEAD != nil && !a.prevRcvAEADExpiry.IsZero() && rcvTime.After(a.prevRcvAEADExpiry) { + a.prevRcvAEAD = nil + a.logger.Debugf("Dropping key phase %d", a.keyPhase-1) + a.prevRcvAEADExpiry = time.Time{} + if a.tracer != nil && a.tracer.DroppedKey != nil { + a.tracer.DroppedKey(a.keyPhase - 1) + } + } + binary.BigEndian.PutUint64(a.nonceBuf[len(a.nonceBuf)-8:], uint64(pn)) + if kp != a.keyPhase.Bit() { + if a.keyPhase > 0 && a.firstRcvdWithCurrentKey == protocol.InvalidPacketNumber || pn < a.firstRcvdWithCurrentKey { + if a.prevRcvAEAD == nil { + return nil, ErrKeysDropped + } + // we updated the key, but the peer hasn't updated yet + dec, err := a.prevRcvAEAD.Open(dst, a.nonceBuf, src, ad) + if err != nil { + err = ErrDecryptionFailed + } + return dec, err + } + // try opening the packet with the next key phase + dec, err := a.nextRcvAEAD.Open(dst, a.nonceBuf, src, ad) + if err != nil { + return nil, ErrDecryptionFailed + } + // Opening succeeded. Check if the peer was allowed to update. + if a.keyPhase > 0 && a.firstSentWithCurrentKey == protocol.InvalidPacketNumber { + return nil, &qerr.TransportError{ + ErrorCode: qerr.KeyUpdateError, + ErrorMessage: "keys updated too quickly", + } + } + a.rollKeys() + a.logger.Debugf("Peer updated keys to %d", a.keyPhase) + // The peer initiated this key update. It's safe to drop the keys for the previous generation now. + // Start a timer to drop the previous key generation. + a.startKeyDropTimer(rcvTime) + if a.tracer != nil && a.tracer.UpdatedKey != nil { + a.tracer.UpdatedKey(a.keyPhase, true) + } + a.firstRcvdWithCurrentKey = pn + return dec, err + } + // The AEAD we're using here will be the qtls.aeadAESGCM13. + // It uses the nonce provided here and XOR it with the IV. + dec, err := a.rcvAEAD.Open(dst, a.nonceBuf, src, ad) + if err != nil { + return dec, ErrDecryptionFailed + } + a.numRcvdWithCurrentKey++ + if a.firstRcvdWithCurrentKey == protocol.InvalidPacketNumber { + // We initiated the key updated, and now we received the first packet protected with the new key phase. + // Therefore, we are certain that the peer rolled its keys as well. Start a timer to drop the old keys. + if a.keyPhase > 0 { + a.logger.Debugf("Peer confirmed key update to phase %d", a.keyPhase) + a.startKeyDropTimer(rcvTime) + } + a.firstRcvdWithCurrentKey = pn + } + return dec, err +} + +func (a *updatableAEAD) Seal(dst, src []byte, pn protocol.PacketNumber, ad []byte) []byte { + if a.firstSentWithCurrentKey == protocol.InvalidPacketNumber { + a.firstSentWithCurrentKey = pn + } + if a.firstPacketNumber == protocol.InvalidPacketNumber { + a.firstPacketNumber = pn + } + a.numSentWithCurrentKey++ + binary.BigEndian.PutUint64(a.nonceBuf[len(a.nonceBuf)-8:], uint64(pn)) + // The AEAD we're using here will be the qtls.aeadAESGCM13. + // It uses the nonce provided here and XOR it with the IV. + return a.sendAEAD.Seal(dst, a.nonceBuf, src, ad) +} + +func (a *updatableAEAD) SetLargestAcked(pn protocol.PacketNumber) error { + if a.firstSentWithCurrentKey != protocol.InvalidPacketNumber && + pn >= a.firstSentWithCurrentKey && a.numRcvdWithCurrentKey == 0 { + return &qerr.TransportError{ + ErrorCode: qerr.KeyUpdateError, + ErrorMessage: fmt.Sprintf("received ACK for key phase %d, but peer didn't update keys", a.keyPhase), + } + } + a.largestAcked = pn + return nil +} + +func (a *updatableAEAD) SetHandshakeConfirmed() { + a.handshakeConfirmed = true +} + +func (a *updatableAEAD) updateAllowed() bool { + if !a.handshakeConfirmed { + return false + } + // the first key update is allowed as soon as the handshake is confirmed + return a.keyPhase == 0 || + // subsequent key updates as soon as a packet sent with that key phase has been acknowledged + (a.firstSentWithCurrentKey != protocol.InvalidPacketNumber && + a.largestAcked != protocol.InvalidPacketNumber && + a.largestAcked >= a.firstSentWithCurrentKey) +} + +func (a *updatableAEAD) shouldInitiateKeyUpdate() bool { + if !a.updateAllowed() { + return false + } + // Initiate the first key update shortly after the handshake, in order to exercise the key update mechanism. + if a.keyPhase == 0 { + if a.numRcvdWithCurrentKey >= FirstKeyUpdateInterval || a.numSentWithCurrentKey >= FirstKeyUpdateInterval { + return true + } + } + if a.numRcvdWithCurrentKey >= KeyUpdateInterval { + a.logger.Debugf("Received %d packets with current key phase. Initiating key update to the next key phase: %d", a.numRcvdWithCurrentKey, a.keyPhase+1) + return true + } + if a.numSentWithCurrentKey >= KeyUpdateInterval { + a.logger.Debugf("Sent %d packets with current key phase. Initiating key update to the next key phase: %d", a.numSentWithCurrentKey, a.keyPhase+1) + return true + } + return false +} + +func (a *updatableAEAD) KeyPhase() protocol.KeyPhaseBit { + if a.shouldInitiateKeyUpdate() { + a.rollKeys() + a.logger.Debugf("Initiating key update to key phase %d", a.keyPhase) + if a.tracer != nil && a.tracer.UpdatedKey != nil { + a.tracer.UpdatedKey(a.keyPhase, false) + } + } + return a.keyPhase.Bit() +} + +func (a *updatableAEAD) Overhead() int { + return a.aeadOverhead +} + +func (a *updatableAEAD) EncryptHeader(sample []byte, firstByte *byte, hdrBytes []byte) { + a.headerEncrypter.EncryptHeader(sample, firstByte, hdrBytes) +} + +func (a *updatableAEAD) DecryptHeader(sample []byte, firstByte *byte, hdrBytes []byte) { + a.headerDecrypter.DecryptHeader(sample, firstByte, hdrBytes) +} + +func (a *updatableAEAD) FirstPacketNumber() protocol.PacketNumber { + return a.firstPacketNumber +} diff --git a/vendor/github.com/quic-go/quic-go/internal/protocol/connection_id.go b/vendor/github.com/quic-go/quic-go/internal/protocol/connection_id.go new file mode 100644 index 00000000..77259b5f --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/protocol/connection_id.go @@ -0,0 +1,116 @@ +package protocol + +import ( + "crypto/rand" + "errors" + "fmt" + "io" +) + +var ErrInvalidConnectionIDLen = errors.New("invalid Connection ID length") + +// An ArbitraryLenConnectionID is a QUIC Connection ID able to represent Connection IDs according to RFC 8999. +// Future QUIC versions might allow connection ID lengths up to 255 bytes, while QUIC v1 +// restricts the length to 20 bytes. +type ArbitraryLenConnectionID []byte + +func (c ArbitraryLenConnectionID) Len() int { + return len(c) +} + +func (c ArbitraryLenConnectionID) Bytes() []byte { + return c +} + +func (c ArbitraryLenConnectionID) String() string { + if c.Len() == 0 { + return "(empty)" + } + return fmt.Sprintf("%x", c.Bytes()) +} + +const maxConnectionIDLen = 20 + +// A ConnectionID in QUIC +type ConnectionID struct { + b [20]byte + l uint8 +} + +// GenerateConnectionID generates a connection ID using cryptographic random +func GenerateConnectionID(l int) (ConnectionID, error) { + var c ConnectionID + c.l = uint8(l) + _, err := rand.Read(c.b[:l]) + return c, err +} + +// ParseConnectionID interprets b as a Connection ID. +// It panics if b is longer than 20 bytes. +func ParseConnectionID(b []byte) ConnectionID { + if len(b) > maxConnectionIDLen { + panic("invalid conn id length") + } + var c ConnectionID + c.l = uint8(len(b)) + copy(c.b[:c.l], b) + return c +} + +// GenerateConnectionIDForInitial generates a connection ID for the Initial packet. +// It uses a length randomly chosen between 8 and 20 bytes. +func GenerateConnectionIDForInitial() (ConnectionID, error) { + r := make([]byte, 1) + if _, err := rand.Read(r); err != nil { + return ConnectionID{}, err + } + l := MinConnectionIDLenInitial + int(r[0])%(maxConnectionIDLen-MinConnectionIDLenInitial+1) + return GenerateConnectionID(l) +} + +// ReadConnectionID reads a connection ID of length len from the given io.Reader. +// It returns io.EOF if there are not enough bytes to read. +func ReadConnectionID(r io.Reader, l int) (ConnectionID, error) { + var c ConnectionID + if l == 0 { + return c, nil + } + if l > maxConnectionIDLen { + return c, ErrInvalidConnectionIDLen + } + c.l = uint8(l) + _, err := io.ReadFull(r, c.b[:l]) + if err == io.ErrUnexpectedEOF { + return c, io.EOF + } + return c, err +} + +// Len returns the length of the connection ID in bytes +func (c ConnectionID) Len() int { + return int(c.l) +} + +// Bytes returns the byte representation +func (c ConnectionID) Bytes() []byte { + return c.b[:c.l] +} + +func (c ConnectionID) String() string { + if c.Len() == 0 { + return "(empty)" + } + return fmt.Sprintf("%x", c.Bytes()) +} + +type DefaultConnectionIDGenerator struct { + ConnLen int +} + +func (d *DefaultConnectionIDGenerator) GenerateConnectionID() (ConnectionID, error) { + return GenerateConnectionID(d.ConnLen) +} + +func (d *DefaultConnectionIDGenerator) ConnectionIDLen() int { + return d.ConnLen +} diff --git a/vendor/github.com/quic-go/quic-go/internal/protocol/encryption_level.go b/vendor/github.com/quic-go/quic-go/internal/protocol/encryption_level.go new file mode 100644 index 00000000..32d38ab1 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/protocol/encryption_level.go @@ -0,0 +1,30 @@ +package protocol + +// EncryptionLevel is the encryption level +// Default value is Unencrypted +type EncryptionLevel uint8 + +const ( + // EncryptionInitial is the Initial encryption level + EncryptionInitial EncryptionLevel = 1 + iota + // EncryptionHandshake is the Handshake encryption level + EncryptionHandshake + // Encryption0RTT is the 0-RTT encryption level + Encryption0RTT + // Encryption1RTT is the 1-RTT encryption level + Encryption1RTT +) + +func (e EncryptionLevel) String() string { + switch e { + case EncryptionInitial: + return "Initial" + case EncryptionHandshake: + return "Handshake" + case Encryption0RTT: + return "0-RTT" + case Encryption1RTT: + return "1-RTT" + } + return "unknown" +} diff --git a/vendor/github.com/quic-go/quic-go/internal/protocol/key_phase.go b/vendor/github.com/quic-go/quic-go/internal/protocol/key_phase.go new file mode 100644 index 00000000..edd740cf --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/protocol/key_phase.go @@ -0,0 +1,36 @@ +package protocol + +// KeyPhase is the key phase +type KeyPhase uint64 + +// Bit determines the key phase bit +func (p KeyPhase) Bit() KeyPhaseBit { + if p%2 == 0 { + return KeyPhaseZero + } + return KeyPhaseOne +} + +// KeyPhaseBit is the key phase bit +type KeyPhaseBit uint8 + +const ( + // KeyPhaseUndefined is an undefined key phase + KeyPhaseUndefined KeyPhaseBit = iota + // KeyPhaseZero is key phase 0 + KeyPhaseZero + // KeyPhaseOne is key phase 1 + KeyPhaseOne +) + +func (p KeyPhaseBit) String() string { + //nolint:exhaustive + switch p { + case KeyPhaseZero: + return "0" + case KeyPhaseOne: + return "1" + default: + return "undefined" + } +} diff --git a/vendor/github.com/quic-go/quic-go/internal/protocol/packet_number.go b/vendor/github.com/quic-go/quic-go/internal/protocol/packet_number.go new file mode 100644 index 00000000..9422db92 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/protocol/packet_number.go @@ -0,0 +1,57 @@ +package protocol + +// A PacketNumber in QUIC +type PacketNumber int64 + +// InvalidPacketNumber is a packet number that is never sent. +// In QUIC, 0 is a valid packet number. +const InvalidPacketNumber PacketNumber = -1 + +// PacketNumberLen is the length of the packet number in bytes +type PacketNumberLen uint8 + +const ( + // PacketNumberLen1 is a packet number length of 1 byte + PacketNumberLen1 PacketNumberLen = 1 + // PacketNumberLen2 is a packet number length of 2 bytes + PacketNumberLen2 PacketNumberLen = 2 + // PacketNumberLen3 is a packet number length of 3 bytes + PacketNumberLen3 PacketNumberLen = 3 + // PacketNumberLen4 is a packet number length of 4 bytes + PacketNumberLen4 PacketNumberLen = 4 +) + +// DecodePacketNumber calculates the packet number based its length and the last seen packet number +// This function is taken from https://www.rfc-editor.org/rfc/rfc9000.html#section-a.3. +func DecodePacketNumber(length PacketNumberLen, largest PacketNumber, truncated PacketNumber) PacketNumber { + expected := largest + 1 + win := PacketNumber(1 << (length * 8)) + hwin := win / 2 + mask := win - 1 + candidate := (expected & ^mask) | truncated + if candidate <= expected-hwin && candidate < 1<<62-win { + return candidate + win + } + if candidate > expected+hwin && candidate >= win { + return candidate - win + } + return candidate +} + +// PacketNumberLengthForHeader gets the length of the packet number for the public header +// it never chooses a PacketNumberLen of 1 byte, since this is too short under certain circumstances +func PacketNumberLengthForHeader(pn, largestAcked PacketNumber) PacketNumberLen { + var numUnacked PacketNumber + if largestAcked == InvalidPacketNumber { + numUnacked = pn + 1 + } else { + numUnacked = pn - largestAcked + } + if numUnacked < 1<<(16-1) { + return PacketNumberLen2 + } + if numUnacked < 1<<(24-1) { + return PacketNumberLen3 + } + return PacketNumberLen4 +} diff --git a/vendor/github.com/quic-go/quic-go/internal/protocol/params.go b/vendor/github.com/quic-go/quic-go/internal/protocol/params.go new file mode 100644 index 00000000..7c4d8d4d --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/protocol/params.go @@ -0,0 +1,180 @@ +package protocol + +import "time" + +// DesiredReceiveBufferSize is the kernel UDP receive buffer size that we'd like to use. +const DesiredReceiveBufferSize = (1 << 20) * 7 // 7 MB + +// DesiredSendBufferSize is the kernel UDP send buffer size that we'd like to use. +const DesiredSendBufferSize = (1 << 20) * 7 // 7 MB + +// InitialPacketSize is the initial (before Path MTU discovery) maximum packet size used. +const InitialPacketSize = 1280 + +// MaxCongestionWindowPackets is the maximum congestion window in packet. +const MaxCongestionWindowPackets = 10000 + +// MaxUndecryptablePackets limits the number of undecryptable packets that are queued in the connection. +const MaxUndecryptablePackets = 32 + +// ConnectionFlowControlMultiplier determines how much larger the connection flow control windows needs to be relative to any stream's flow control window +// This is the value that Chromium is using +const ConnectionFlowControlMultiplier = 1.5 + +// DefaultInitialMaxStreamData is the default initial stream-level flow control window for receiving data +const DefaultInitialMaxStreamData = (1 << 10) * 512 // 512 kb + +// DefaultInitialMaxData is the connection-level flow control window for receiving data +const DefaultInitialMaxData = ConnectionFlowControlMultiplier * DefaultInitialMaxStreamData + +// DefaultMaxReceiveStreamFlowControlWindow is the default maximum stream-level flow control window for receiving data +const DefaultMaxReceiveStreamFlowControlWindow = 6 * (1 << 20) // 6 MB + +// DefaultMaxReceiveConnectionFlowControlWindow is the default connection-level flow control window for receiving data +const DefaultMaxReceiveConnectionFlowControlWindow = 15 * (1 << 20) // 15 MB + +// WindowUpdateThreshold is the fraction of the receive window that has to be consumed before an higher offset is advertised to the client +const WindowUpdateThreshold = 0.25 + +// DefaultMaxIncomingStreams is the maximum number of streams that a peer may open +const DefaultMaxIncomingStreams = 100 + +// DefaultMaxIncomingUniStreams is the maximum number of unidirectional streams that a peer may open +const DefaultMaxIncomingUniStreams = 100 + +// MaxServerUnprocessedPackets is the max number of packets stored in the server that are not yet processed. +const MaxServerUnprocessedPackets = 1024 + +// MaxConnUnprocessedPackets is the max number of packets stored in each connection that are not yet processed. +const MaxConnUnprocessedPackets = 256 + +// SkipPacketInitialPeriod is the initial period length used for packet number skipping to prevent an Optimistic ACK attack. +// Every time a packet number is skipped, the period is doubled, up to SkipPacketMaxPeriod. +const SkipPacketInitialPeriod PacketNumber = 256 + +// SkipPacketMaxPeriod is the maximum period length used for packet number skipping. +const SkipPacketMaxPeriod PacketNumber = 128 * 1024 + +// MaxAcceptQueueSize is the maximum number of connections that the server queues for accepting. +// If the queue is full, new connection attempts will be rejected. +const MaxAcceptQueueSize = 32 + +// TokenValidity is the duration that a (non-retry) token is considered valid +const TokenValidity = 24 * time.Hour + +// MaxOutstandingSentPackets is maximum number of packets saved for retransmission. +// When reached, it imposes a soft limit on sending new packets: +// Sending ACKs and retransmission is still allowed, but now new regular packets can be sent. +const MaxOutstandingSentPackets = 2 * MaxCongestionWindowPackets + +// MaxTrackedSentPackets is maximum number of sent packets saved for retransmission. +// When reached, no more packets will be sent. +// This value *must* be larger than MaxOutstandingSentPackets. +const MaxTrackedSentPackets = MaxOutstandingSentPackets * 5 / 4 + +// MaxNonAckElicitingAcks is the maximum number of packets containing an ACK, +// but no ack-eliciting frames, that we send in a row +const MaxNonAckElicitingAcks = 19 + +// MaxStreamFrameSorterGaps is the maximum number of gaps between received StreamFrames +// prevents DoS attacks against the streamFrameSorter +const MaxStreamFrameSorterGaps = 1000 + +// MinStreamFrameBufferSize is the minimum data length of a received STREAM frame +// that we use the buffer for. This protects against a DoS where an attacker would send us +// very small STREAM frames to consume a lot of memory. +const MinStreamFrameBufferSize = 128 + +// MinCoalescedPacketSize is the minimum size of a coalesced packet that we pack. +// If a packet has less than this number of bytes, we won't coalesce any more packets onto it. +const MinCoalescedPacketSize = 128 + +// MaxCryptoStreamOffset is the maximum offset allowed on any of the crypto streams. +// This limits the size of the ClientHello and Certificates that can be received. +const MaxCryptoStreamOffset = 16 * (1 << 10) + +// MinRemoteIdleTimeout is the minimum value that we accept for the remote idle timeout +const MinRemoteIdleTimeout = 5 * time.Second + +// DefaultIdleTimeout is the default idle timeout +const DefaultIdleTimeout = 30 * time.Second + +// DefaultHandshakeIdleTimeout is the default idle timeout used before handshake completion. +const DefaultHandshakeIdleTimeout = 5 * time.Second + +// MaxKeepAliveInterval is the maximum time until we send a packet to keep a connection alive. +// It should be shorter than the time that NATs clear their mapping. +const MaxKeepAliveInterval = 20 * time.Second + +// RetiredConnectionIDDeleteTimeout is the time we keep closed connections around in order to retransmit the CONNECTION_CLOSE. +// after this time all information about the old connection will be deleted +const RetiredConnectionIDDeleteTimeout = 5 * time.Second + +// MinStreamFrameSize is the minimum size that has to be left in a packet, so that we add another STREAM frame. +// This avoids splitting up STREAM frames into small pieces, which has 2 advantages: +// 1. it reduces the framing overhead +// 2. it reduces the head-of-line blocking, when a packet is lost +const MinStreamFrameSize ByteCount = 128 + +// MaxPostHandshakeCryptoFrameSize is the maximum size of CRYPTO frames +// we send after the handshake completes. +const MaxPostHandshakeCryptoFrameSize = 1000 + +// MaxAckFrameSize is the maximum size for an ACK frame that we write +// Due to the varint encoding, ACK frames can grow (almost) indefinitely large. +// The MaxAckFrameSize should be large enough to encode many ACK range, +// but must ensure that a maximum size ACK frame fits into one packet. +const MaxAckFrameSize ByteCount = 1000 + +// MaxNumAckRanges is the maximum number of ACK ranges that we send in an ACK frame. +// It also serves as a limit for the packet history. +// If at any point we keep track of more ranges, old ranges are discarded. +const MaxNumAckRanges = 32 + +// MinPacingDelay is the minimum duration that is used for packet pacing +// If the packet packing frequency is higher, multiple packets might be sent at once. +// Example: For a packet pacing delay of 200μs, we would send 5 packets at once, wait for 1ms, and so forth. +const MinPacingDelay = time.Millisecond + +// DefaultConnectionIDLength is the connection ID length that is used for multiplexed connections +// if no other value is configured. +const DefaultConnectionIDLength = 4 + +// MaxActiveConnectionIDs is the number of connection IDs that we're storing. +const MaxActiveConnectionIDs = 4 + +// MaxIssuedConnectionIDs is the maximum number of connection IDs that we're issuing at the same time. +const MaxIssuedConnectionIDs = 6 + +// PacketsPerConnectionID is the number of packets we send using one connection ID. +// If the peer provices us with enough new connection IDs, we switch to a new connection ID. +const PacketsPerConnectionID = 10000 + +// AckDelayExponent is the ack delay exponent used when sending ACKs. +const AckDelayExponent = 3 + +// Estimated timer granularity. +// The loss detection timer will not be set to a value smaller than granularity. +const TimerGranularity = time.Millisecond + +// MaxAckDelay is the maximum time by which we delay sending ACKs. +const MaxAckDelay = 25 * time.Millisecond + +// MaxAckDelayInclGranularity is the max_ack_delay including the timer granularity. +// This is the value that should be advertised to the peer. +const MaxAckDelayInclGranularity = MaxAckDelay + TimerGranularity + +// KeyUpdateInterval is the maximum number of packets we send or receive before initiating a key update. +const KeyUpdateInterval = 100 * 1000 + +// Max0RTTQueueingDuration is the maximum time that we store 0-RTT packets in order to wait for the corresponding Initial to be received. +const Max0RTTQueueingDuration = 100 * time.Millisecond + +// Max0RTTQueues is the maximum number of connections that we buffer 0-RTT packets for. +const Max0RTTQueues = 32 + +// Max0RTTQueueLen is the maximum number of 0-RTT packets that we buffer for each connection. +// When a new connection is created, all buffered packets are passed to the connection immediately. +// To avoid blocking, this value has to be smaller than MaxConnUnprocessedPackets. +// To avoid packets being dropped as undecryptable by the connection, this value has to be smaller than MaxUndecryptablePackets. +const Max0RTTQueueLen = 31 diff --git a/vendor/github.com/quic-go/quic-go/internal/protocol/perspective.go b/vendor/github.com/quic-go/quic-go/internal/protocol/perspective.go new file mode 100644 index 00000000..5a29d3ce --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/protocol/perspective.go @@ -0,0 +1,26 @@ +package protocol + +// Perspective determines if we're acting as a server or a client +type Perspective int + +// the perspectives +const ( + PerspectiveServer Perspective = 1 + PerspectiveClient Perspective = 2 +) + +// Opposite returns the perspective of the peer +func (p Perspective) Opposite() Perspective { + return 3 - p +} + +func (p Perspective) String() string { + switch p { + case PerspectiveServer: + return "server" + case PerspectiveClient: + return "client" + default: + return "invalid perspective" + } +} diff --git a/vendor/github.com/quic-go/quic-go/internal/protocol/protocol.go b/vendor/github.com/quic-go/quic-go/internal/protocol/protocol.go new file mode 100644 index 00000000..d056cb9d --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/protocol/protocol.go @@ -0,0 +1,152 @@ +package protocol + +import ( + "fmt" + "time" +) + +// The PacketType is the Long Header Type +type PacketType uint8 + +const ( + // PacketTypeInitial is the packet type of an Initial packet + PacketTypeInitial PacketType = 1 + iota + // PacketTypeRetry is the packet type of a Retry packet + PacketTypeRetry + // PacketTypeHandshake is the packet type of a Handshake packet + PacketTypeHandshake + // PacketType0RTT is the packet type of a 0-RTT packet + PacketType0RTT +) + +func (t PacketType) String() string { + switch t { + case PacketTypeInitial: + return "Initial" + case PacketTypeRetry: + return "Retry" + case PacketTypeHandshake: + return "Handshake" + case PacketType0RTT: + return "0-RTT Protected" + default: + return fmt.Sprintf("unknown packet type: %d", t) + } +} + +type ECN uint8 + +const ( + ECNUnsupported ECN = iota + ECNNon // 00 + ECT1 // 01 + ECT0 // 10 + ECNCE // 11 +) + +func ParseECNHeaderBits(bits byte) ECN { + switch bits { + case 0: + return ECNNon + case 0b00000010: + return ECT0 + case 0b00000001: + return ECT1 + case 0b00000011: + return ECNCE + default: + panic("invalid ECN bits") + } +} + +func (e ECN) ToHeaderBits() byte { + //nolint:exhaustive // There are only 4 values. + switch e { + case ECNNon: + return 0 + case ECT0: + return 0b00000010 + case ECT1: + return 0b00000001 + case ECNCE: + return 0b00000011 + default: + panic("ECN unsupported") + } +} + +func (e ECN) String() string { + switch e { + case ECNUnsupported: + return "ECN unsupported" + case ECNNon: + return "Not-ECT" + case ECT1: + return "ECT(1)" + case ECT0: + return "ECT(0)" + case ECNCE: + return "CE" + default: + return fmt.Sprintf("invalid ECN value: %d", e) + } +} + +// A ByteCount in QUIC +type ByteCount int64 + +// MaxByteCount is the maximum value of a ByteCount +const MaxByteCount = ByteCount(1<<62 - 1) + +// InvalidByteCount is an invalid byte count +const InvalidByteCount ByteCount = -1 + +// A StatelessResetToken is a stateless reset token. +type StatelessResetToken [16]byte + +// MaxPacketBufferSize maximum packet size of any QUIC packet, based on +// ethernet's max size, minus the IP and UDP headers. IPv6 has a 40 byte header, +// UDP adds an additional 8 bytes. This is a total overhead of 48 bytes. +// Ethernet's max packet size is 1500 bytes, 1500 - 48 = 1452. +const MaxPacketBufferSize = 1452 + +// MaxLargePacketBufferSize is used when using GSO +const MaxLargePacketBufferSize = 20 * 1024 + +// MinInitialPacketSize is the minimum size an Initial packet is required to have. +const MinInitialPacketSize = 1200 + +// MinUnknownVersionPacketSize is the minimum size a packet with an unknown version +// needs to have in order to trigger a Version Negotiation packet. +const MinUnknownVersionPacketSize = MinInitialPacketSize + +// MinStatelessResetSize is the minimum size of a stateless reset packet that we send +const MinStatelessResetSize = 1 /* first byte */ + 20 /* max. conn ID length */ + 4 /* max. packet number length */ + 1 /* min. payload length */ + 16 /* token */ + +// MinConnectionIDLenInitial is the minimum length of the destination connection ID on an Initial packet. +const MinConnectionIDLenInitial = 8 + +// DefaultAckDelayExponent is the default ack delay exponent +const DefaultAckDelayExponent = 3 + +// DefaultActiveConnectionIDLimit is the default active connection ID limit +const DefaultActiveConnectionIDLimit = 2 + +// MaxAckDelayExponent is the maximum ack delay exponent +const MaxAckDelayExponent = 20 + +// DefaultMaxAckDelay is the default max_ack_delay +const DefaultMaxAckDelay = 25 * time.Millisecond + +// MaxMaxAckDelay is the maximum max_ack_delay +const MaxMaxAckDelay = (1<<14 - 1) * time.Millisecond + +// MaxConnIDLen is the maximum length of the connection ID +const MaxConnIDLen = 20 + +// InvalidPacketLimitAES is the maximum number of packets that we can fail to decrypt when using +// AEAD_AES_128_GCM or AEAD_AES_265_GCM. +const InvalidPacketLimitAES = 1 << 52 + +// InvalidPacketLimitChaCha is the maximum number of packets that we can fail to decrypt when using AEAD_CHACHA20_POLY1305. +const InvalidPacketLimitChaCha = 1 << 36 diff --git a/vendor/github.com/quic-go/quic-go/internal/protocol/stream.go b/vendor/github.com/quic-go/quic-go/internal/protocol/stream.go new file mode 100644 index 00000000..ad7de864 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/protocol/stream.go @@ -0,0 +1,76 @@ +package protocol + +// StreamType encodes if this is a unidirectional or bidirectional stream +type StreamType uint8 + +const ( + // StreamTypeUni is a unidirectional stream + StreamTypeUni StreamType = iota + // StreamTypeBidi is a bidirectional stream + StreamTypeBidi +) + +// InvalidPacketNumber is a stream ID that is invalid. +// The first valid stream ID in QUIC is 0. +const InvalidStreamID StreamID = -1 + +// StreamNum is the stream number +type StreamNum int64 + +const ( + // InvalidStreamNum is an invalid stream number. + InvalidStreamNum = -1 + // MaxStreamCount is the maximum stream count value that can be sent in MAX_STREAMS frames + // and as the stream count in the transport parameters + MaxStreamCount StreamNum = 1 << 60 +) + +// StreamID calculates the stream ID. +func (s StreamNum) StreamID(stype StreamType, pers Perspective) StreamID { + if s == 0 { + return InvalidStreamID + } + var first StreamID + switch stype { + case StreamTypeBidi: + switch pers { + case PerspectiveClient: + first = 0 + case PerspectiveServer: + first = 1 + } + case StreamTypeUni: + switch pers { + case PerspectiveClient: + first = 2 + case PerspectiveServer: + first = 3 + } + } + return first + 4*StreamID(s-1) +} + +// A StreamID in QUIC +type StreamID int64 + +// InitiatedBy says if the stream was initiated by the client or by the server +func (s StreamID) InitiatedBy() Perspective { + if s%2 == 0 { + return PerspectiveClient + } + return PerspectiveServer +} + +// Type says if this is a unidirectional or bidirectional stream +func (s StreamID) Type() StreamType { + if s%4 >= 2 { + return StreamTypeUni + } + return StreamTypeBidi +} + +// StreamNum returns how many streams in total are below this +// Example: for stream 9 it returns 3 (i.e. streams 1, 5 and 9) +func (s StreamID) StreamNum() StreamNum { + return StreamNum(s/4) + 1 +} diff --git a/vendor/github.com/quic-go/quic-go/internal/protocol/version.go b/vendor/github.com/quic-go/quic-go/internal/protocol/version.go new file mode 100644 index 00000000..025ade9b --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/protocol/version.go @@ -0,0 +1,114 @@ +package protocol + +import ( + "encoding/binary" + "fmt" + "math" + "sync" + "time" + + "golang.org/x/exp/rand" +) + +// Version is a version number as int +type Version uint32 + +// gQUIC version range as defined in the wiki: https://github.com/quicwg/base-drafts/wiki/QUIC-Versions +const ( + gquicVersion0 = 0x51303030 + maxGquicVersion = 0x51303439 +) + +// The version numbers, making grepping easier +const ( + VersionUnknown Version = math.MaxUint32 + versionDraft29 Version = 0xff00001d // draft-29 used to be a widely deployed version + Version1 Version = 0x1 + Version2 Version = 0x6b3343cf +) + +// SupportedVersions lists the versions that the server supports +// must be in sorted descending order +var SupportedVersions = []Version{Version1, Version2} + +// IsValidVersion says if the version is known to quic-go +func IsValidVersion(v Version) bool { + return v == Version1 || IsSupportedVersion(SupportedVersions, v) +} + +func (vn Version) String() string { + //nolint:exhaustive + switch vn { + case VersionUnknown: + return "unknown" + case versionDraft29: + return "draft-29" + case Version1: + return "v1" + case Version2: + return "v2" + default: + if vn.isGQUIC() { + return fmt.Sprintf("gQUIC %d", vn.toGQUICVersion()) + } + return fmt.Sprintf("%#x", uint32(vn)) + } +} + +func (vn Version) isGQUIC() bool { + return vn > gquicVersion0 && vn <= maxGquicVersion +} + +func (vn Version) toGQUICVersion() int { + return int(10*(vn-gquicVersion0)/0x100) + int(vn%0x10) +} + +// IsSupportedVersion returns true if the server supports this version +func IsSupportedVersion(supported []Version, v Version) bool { + for _, t := range supported { + if t == v { + return true + } + } + return false +} + +// ChooseSupportedVersion finds the best version in the overlap of ours and theirs +// ours is a slice of versions that we support, sorted by our preference (descending) +// theirs is a slice of versions offered by the peer. The order does not matter. +// The bool returned indicates if a matching version was found. +func ChooseSupportedVersion(ours, theirs []Version) (Version, bool) { + for _, ourVer := range ours { + for _, theirVer := range theirs { + if ourVer == theirVer { + return ourVer, true + } + } + } + return 0, false +} + +var ( + versionNegotiationMx sync.Mutex + versionNegotiationRand = rand.New(rand.NewSource(uint64(time.Now().UnixNano()))) +) + +// generateReservedVersion generates a reserved version (v & 0x0f0f0f0f == 0x0a0a0a0a) +func generateReservedVersion() Version { + var b [4]byte + _, _ = versionNegotiationRand.Read(b[:]) // ignore the error here. Failure to read random data doesn't break anything + return Version((binary.BigEndian.Uint32(b[:]) | 0x0a0a0a0a) & 0xfafafafa) +} + +// GetGreasedVersions adds one reserved version number to a slice of version numbers, at a random position. +// It doesn't modify the supported slice. +func GetGreasedVersions(supported []Version) []Version { + versionNegotiationMx.Lock() + defer versionNegotiationMx.Unlock() + randPos := rand.Intn(len(supported) + 1) + greased := make([]Version, len(supported)+1) + copy(greased, supported[:randPos]) + greased[randPos] = generateReservedVersion() + copy(greased[randPos+1:], supported[randPos:]) + return greased +} diff --git a/vendor/github.com/quic-go/quic-go/internal/qerr/error_codes.go b/vendor/github.com/quic-go/quic-go/internal/qerr/error_codes.go new file mode 100644 index 00000000..00361308 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/qerr/error_codes.go @@ -0,0 +1,87 @@ +package qerr + +import ( + "crypto/tls" + "fmt" +) + +// TransportErrorCode is a QUIC transport error. +type TransportErrorCode uint64 + +// The error codes defined by QUIC +const ( + NoError TransportErrorCode = 0x0 + InternalError TransportErrorCode = 0x1 + ConnectionRefused TransportErrorCode = 0x2 + FlowControlError TransportErrorCode = 0x3 + StreamLimitError TransportErrorCode = 0x4 + StreamStateError TransportErrorCode = 0x5 + FinalSizeError TransportErrorCode = 0x6 + FrameEncodingError TransportErrorCode = 0x7 + TransportParameterError TransportErrorCode = 0x8 + ConnectionIDLimitError TransportErrorCode = 0x9 + ProtocolViolation TransportErrorCode = 0xa + InvalidToken TransportErrorCode = 0xb + ApplicationErrorErrorCode TransportErrorCode = 0xc + CryptoBufferExceeded TransportErrorCode = 0xd + KeyUpdateError TransportErrorCode = 0xe + AEADLimitReached TransportErrorCode = 0xf + NoViablePathError TransportErrorCode = 0x10 +) + +func (e TransportErrorCode) IsCryptoError() bool { + return e >= 0x100 && e < 0x200 +} + +// Message is a description of the error. +// It only returns a non-empty string for crypto errors. +func (e TransportErrorCode) Message() string { + if !e.IsCryptoError() { + return "" + } + return tls.AlertError(e - 0x100).Error() +} + +func (e TransportErrorCode) String() string { + switch e { + case NoError: + return "NO_ERROR" + case InternalError: + return "INTERNAL_ERROR" + case ConnectionRefused: + return "CONNECTION_REFUSED" + case FlowControlError: + return "FLOW_CONTROL_ERROR" + case StreamLimitError: + return "STREAM_LIMIT_ERROR" + case StreamStateError: + return "STREAM_STATE_ERROR" + case FinalSizeError: + return "FINAL_SIZE_ERROR" + case FrameEncodingError: + return "FRAME_ENCODING_ERROR" + case TransportParameterError: + return "TRANSPORT_PARAMETER_ERROR" + case ConnectionIDLimitError: + return "CONNECTION_ID_LIMIT_ERROR" + case ProtocolViolation: + return "PROTOCOL_VIOLATION" + case InvalidToken: + return "INVALID_TOKEN" + case ApplicationErrorErrorCode: + return "APPLICATION_ERROR" + case CryptoBufferExceeded: + return "CRYPTO_BUFFER_EXCEEDED" + case KeyUpdateError: + return "KEY_UPDATE_ERROR" + case AEADLimitReached: + return "AEAD_LIMIT_REACHED" + case NoViablePathError: + return "NO_VIABLE_PATH" + default: + if e.IsCryptoError() { + return fmt.Sprintf("CRYPTO_ERROR %#x", uint16(e)) + } + return fmt.Sprintf("unknown error code: %#x", uint16(e)) + } +} diff --git a/vendor/github.com/quic-go/quic-go/internal/qerr/errors.go b/vendor/github.com/quic-go/quic-go/internal/qerr/errors.go new file mode 100644 index 00000000..8f5936df --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/qerr/errors.go @@ -0,0 +1,139 @@ +package qerr + +import ( + "fmt" + "net" + + "github.com/quic-go/quic-go/internal/protocol" +) + +var ( + ErrHandshakeTimeout = &HandshakeTimeoutError{} + ErrIdleTimeout = &IdleTimeoutError{} +) + +type TransportError struct { + Remote bool + FrameType uint64 + ErrorCode TransportErrorCode + ErrorMessage string + error error // only set for local errors, sometimes +} + +var _ error = &TransportError{} + +// NewLocalCryptoError create a new TransportError instance for a crypto error +func NewLocalCryptoError(tlsAlert uint8, err error) *TransportError { + return &TransportError{ + ErrorCode: 0x100 + TransportErrorCode(tlsAlert), + error: err, + } +} + +func (e *TransportError) Error() string { + str := fmt.Sprintf("%s (%s)", e.ErrorCode.String(), getRole(e.Remote)) + if e.FrameType != 0 { + str += fmt.Sprintf(" (frame type: %#x)", e.FrameType) + } + msg := e.ErrorMessage + if len(msg) == 0 && e.error != nil { + msg = e.error.Error() + } + if len(msg) == 0 { + msg = e.ErrorCode.Message() + } + if len(msg) == 0 { + return str + } + return str + ": " + msg +} + +func (e *TransportError) Is(target error) bool { + return target == net.ErrClosed +} + +func (e *TransportError) Unwrap() error { + return e.error +} + +// An ApplicationErrorCode is an application-defined error code. +type ApplicationErrorCode uint64 + +func (e *ApplicationError) Is(target error) bool { + return target == net.ErrClosed +} + +// A StreamErrorCode is an error code used to cancel streams. +type StreamErrorCode uint64 + +type ApplicationError struct { + Remote bool + ErrorCode ApplicationErrorCode + ErrorMessage string +} + +var _ error = &ApplicationError{} + +func (e *ApplicationError) Error() string { + if len(e.ErrorMessage) == 0 { + return fmt.Sprintf("Application error %#x (%s)", e.ErrorCode, getRole(e.Remote)) + } + return fmt.Sprintf("Application error %#x (%s): %s", e.ErrorCode, getRole(e.Remote), e.ErrorMessage) +} + +type IdleTimeoutError struct{} + +var _ error = &IdleTimeoutError{} + +func (e *IdleTimeoutError) Timeout() bool { return true } +func (e *IdleTimeoutError) Temporary() bool { return false } +func (e *IdleTimeoutError) Error() string { return "timeout: no recent network activity" } +func (e *IdleTimeoutError) Is(target error) bool { return target == net.ErrClosed } + +type HandshakeTimeoutError struct{} + +var _ error = &HandshakeTimeoutError{} + +func (e *HandshakeTimeoutError) Timeout() bool { return true } +func (e *HandshakeTimeoutError) Temporary() bool { return false } +func (e *HandshakeTimeoutError) Error() string { return "timeout: handshake did not complete in time" } +func (e *HandshakeTimeoutError) Is(target error) bool { return target == net.ErrClosed } + +// A VersionNegotiationError occurs when the client and the server can't agree on a QUIC version. +type VersionNegotiationError struct { + Ours []protocol.Version + Theirs []protocol.Version +} + +func (e *VersionNegotiationError) Error() string { + return fmt.Sprintf("no compatible QUIC version found (we support %s, server offered %s)", e.Ours, e.Theirs) +} + +func (e *VersionNegotiationError) Is(target error) bool { + return target == net.ErrClosed +} + +// A StatelessResetError occurs when we receive a stateless reset. +type StatelessResetError struct { + Token protocol.StatelessResetToken +} + +var _ net.Error = &StatelessResetError{} + +func (e *StatelessResetError) Error() string { + return fmt.Sprintf("received a stateless reset with token %x", e.Token) +} + +func (e *StatelessResetError) Is(target error) bool { + return target == net.ErrClosed +} + +func (e *StatelessResetError) Timeout() bool { return false } +func (e *StatelessResetError) Temporary() bool { return true } + +func getRole(remote bool) string { + if remote { + return "remote" + } + return "local" +} diff --git a/vendor/github.com/quic-go/quic-go/internal/qtls/cipher_suite.go b/vendor/github.com/quic-go/quic-go/internal/qtls/cipher_suite.go new file mode 100644 index 00000000..32a921cd --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/qtls/cipher_suite.go @@ -0,0 +1,52 @@ +package qtls + +import ( + "crypto/tls" + "fmt" + "unsafe" +) + +//go:linkname cipherSuitesTLS13 crypto/tls.cipherSuitesTLS13 +var cipherSuitesTLS13 []unsafe.Pointer + +//go:linkname defaultCipherSuitesTLS13 crypto/tls.defaultCipherSuitesTLS13 +var defaultCipherSuitesTLS13 []uint16 + +//go:linkname defaultCipherSuitesTLS13NoAES crypto/tls.defaultCipherSuitesTLS13NoAES +var defaultCipherSuitesTLS13NoAES []uint16 + +var cipherSuitesModified bool + +// SetCipherSuite modifies the cipherSuiteTLS13 slice of cipher suites inside qtls +// such that it only contains the cipher suite with the chosen id. +// The reset function returned resets them back to the original value. +func SetCipherSuite(id uint16) (reset func()) { + if cipherSuitesModified { + panic("cipher suites modified multiple times without resetting") + } + cipherSuitesModified = true + + origCipherSuitesTLS13 := append([]unsafe.Pointer{}, cipherSuitesTLS13...) + origDefaultCipherSuitesTLS13 := append([]uint16{}, defaultCipherSuitesTLS13...) + origDefaultCipherSuitesTLS13NoAES := append([]uint16{}, defaultCipherSuitesTLS13NoAES...) + // The order is given by the order of the slice elements in cipherSuitesTLS13 in qtls. + switch id { + case tls.TLS_AES_128_GCM_SHA256: + cipherSuitesTLS13 = cipherSuitesTLS13[:1] + case tls.TLS_CHACHA20_POLY1305_SHA256: + cipherSuitesTLS13 = cipherSuitesTLS13[1:2] + case tls.TLS_AES_256_GCM_SHA384: + cipherSuitesTLS13 = cipherSuitesTLS13[2:] + default: + panic(fmt.Sprintf("unexpected cipher suite: %d", id)) + } + defaultCipherSuitesTLS13 = []uint16{id} + defaultCipherSuitesTLS13NoAES = []uint16{id} + + return func() { + cipherSuitesTLS13 = origCipherSuitesTLS13 + defaultCipherSuitesTLS13 = origDefaultCipherSuitesTLS13 + defaultCipherSuitesTLS13NoAES = origDefaultCipherSuitesTLS13NoAES + cipherSuitesModified = false + } +} diff --git a/vendor/github.com/quic-go/quic-go/internal/qtls/client_session_cache.go b/vendor/github.com/quic-go/quic-go/internal/qtls/client_session_cache.go new file mode 100644 index 00000000..4acac9e2 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/qtls/client_session_cache.go @@ -0,0 +1,70 @@ +package qtls + +import ( + "crypto/tls" + "sync" +) + +type clientSessionCache struct { + mx sync.Mutex + getData func(earlyData bool) []byte + setData func(data []byte, earlyData bool) (allowEarlyData bool) + wrapped tls.ClientSessionCache +} + +var _ tls.ClientSessionCache = &clientSessionCache{} + +func (c *clientSessionCache) Put(key string, cs *tls.ClientSessionState) { + c.mx.Lock() + defer c.mx.Unlock() + + if cs == nil { + c.wrapped.Put(key, nil) + return + } + ticket, state, err := cs.ResumptionState() + if err != nil || state == nil { + c.wrapped.Put(key, cs) + return + } + state.Extra = append(state.Extra, addExtraPrefix(c.getData(state.EarlyData))) + newCS, err := tls.NewResumptionState(ticket, state) + if err != nil { + // It's not clear why this would error. Just save the original state. + c.wrapped.Put(key, cs) + return + } + c.wrapped.Put(key, newCS) +} + +func (c *clientSessionCache) Get(key string) (*tls.ClientSessionState, bool) { + c.mx.Lock() + defer c.mx.Unlock() + + cs, ok := c.wrapped.Get(key) + if !ok || cs == nil { + return cs, ok + } + ticket, state, err := cs.ResumptionState() + if err != nil { + // It's not clear why this would error. + // Remove the ticket from the session cache, so we don't run into this error over and over again + c.wrapped.Put(key, nil) + return nil, false + } + // restore QUIC transport parameters and RTT stored in state.Extra + if extra := findExtraData(state.Extra); extra != nil { + earlyData := c.setData(extra, state.EarlyData) + if state.EarlyData { + state.EarlyData = earlyData + } + } + session, err := tls.NewResumptionState(ticket, state) + if err != nil { + // It's not clear why this would error. + // Remove the ticket from the session cache, so we don't run into this error over and over again + c.wrapped.Put(key, nil) + return nil, false + } + return session, true +} diff --git a/vendor/github.com/quic-go/quic-go/internal/qtls/conn.go b/vendor/github.com/quic-go/quic-go/internal/qtls/conn.go new file mode 100644 index 00000000..6660ac66 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/qtls/conn.go @@ -0,0 +1,21 @@ +package qtls + +import ( + "net" + "time" +) + +type conn struct { + localAddr, remoteAddr net.Addr +} + +var _ net.Conn = &conn{} + +func (c *conn) Read([]byte) (int, error) { return 0, nil } +func (c *conn) Write([]byte) (int, error) { return 0, nil } +func (c *conn) Close() error { return nil } +func (c *conn) RemoteAddr() net.Addr { return c.remoteAddr } +func (c *conn) LocalAddr() net.Addr { return c.localAddr } +func (c *conn) SetReadDeadline(time.Time) error { return nil } +func (c *conn) SetWriteDeadline(time.Time) error { return nil } +func (c *conn) SetDeadline(time.Time) error { return nil } diff --git a/vendor/github.com/quic-go/quic-go/internal/qtls/qtls.go b/vendor/github.com/quic-go/quic-go/internal/qtls/qtls.go new file mode 100644 index 00000000..cdfe82a2 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/qtls/qtls.go @@ -0,0 +1,150 @@ +package qtls + +import ( + "bytes" + "crypto/tls" + "fmt" + "net" + + "github.com/quic-go/quic-go/internal/protocol" +) + +func SetupConfigForServer( + conf *tls.Config, + localAddr, remoteAddr net.Addr, + getData func() []byte, + handleSessionTicket func([]byte, bool) bool, +) *tls.Config { + // Workaround for https://github.com/golang/go/issues/60506. + // This initializes the session tickets _before_ cloning the config. + _, _ = conf.DecryptTicket(nil, tls.ConnectionState{}) + + conf = conf.Clone() + conf.MinVersion = tls.VersionTLS13 + + // add callbacks to save transport parameters into the session ticket + origWrapSession := conf.WrapSession + conf.WrapSession = func(cs tls.ConnectionState, state *tls.SessionState) ([]byte, error) { + // Add QUIC session ticket + state.Extra = append(state.Extra, addExtraPrefix(getData())) + + if origWrapSession != nil { + return origWrapSession(cs, state) + } + b, err := conf.EncryptTicket(cs, state) + return b, err + } + origUnwrapSession := conf.UnwrapSession + // UnwrapSession might be called multiple times, as the client can use multiple session tickets. + // However, using 0-RTT is only possible with the first session ticket. + // crypto/tls guarantees that this callback is called in the same order as the session ticket in the ClientHello. + var unwrapCount int + conf.UnwrapSession = func(identity []byte, connState tls.ConnectionState) (*tls.SessionState, error) { + unwrapCount++ + var state *tls.SessionState + var err error + if origUnwrapSession != nil { + state, err = origUnwrapSession(identity, connState) + } else { + state, err = conf.DecryptTicket(identity, connState) + } + if err != nil || state == nil { + return nil, err + } + + extra := findExtraData(state.Extra) + if extra != nil { + state.EarlyData = handleSessionTicket(extra, state.EarlyData && unwrapCount == 1) + } else { + state.EarlyData = false + } + + return state, nil + } + // The tls.Config contains two callbacks that pass in a tls.ClientHelloInfo. + // Since crypto/tls doesn't do it, we need to make sure to set the Conn field with a fake net.Conn + // that allows the caller to get the local and the remote address. + if conf.GetConfigForClient != nil { + gcfc := conf.GetConfigForClient + conf.GetConfigForClient = func(info *tls.ClientHelloInfo) (*tls.Config, error) { + info.Conn = &conn{localAddr: localAddr, remoteAddr: remoteAddr} + c, err := gcfc(info) + if c != nil { + // We're returning a tls.Config here, so we need to apply this recursively. + c = SetupConfigForServer(c, localAddr, remoteAddr, getData, handleSessionTicket) + } + return c, err + } + } + if conf.GetCertificate != nil { + gc := conf.GetCertificate + conf.GetCertificate = func(info *tls.ClientHelloInfo) (*tls.Certificate, error) { + info.Conn = &conn{localAddr: localAddr, remoteAddr: remoteAddr} + return gc(info) + } + } + return conf +} + +func SetupConfigForClient( + qconf *tls.QUICConfig, + getData func(earlyData bool) []byte, + setData func(data []byte, earlyData bool) (allowEarlyData bool), +) { + conf := qconf.TLSConfig + if conf.ClientSessionCache != nil { + origCache := conf.ClientSessionCache + conf.ClientSessionCache = &clientSessionCache{ + wrapped: origCache, + getData: getData, + setData: setData, + } + } +} + +func ToTLSEncryptionLevel(e protocol.EncryptionLevel) tls.QUICEncryptionLevel { + switch e { + case protocol.EncryptionInitial: + return tls.QUICEncryptionLevelInitial + case protocol.EncryptionHandshake: + return tls.QUICEncryptionLevelHandshake + case protocol.Encryption1RTT: + return tls.QUICEncryptionLevelApplication + case protocol.Encryption0RTT: + return tls.QUICEncryptionLevelEarly + default: + panic(fmt.Sprintf("unexpected encryption level: %s", e)) + } +} + +func FromTLSEncryptionLevel(e tls.QUICEncryptionLevel) protocol.EncryptionLevel { + switch e { + case tls.QUICEncryptionLevelInitial: + return protocol.EncryptionInitial + case tls.QUICEncryptionLevelHandshake: + return protocol.EncryptionHandshake + case tls.QUICEncryptionLevelApplication: + return protocol.Encryption1RTT + case tls.QUICEncryptionLevelEarly: + return protocol.Encryption0RTT + default: + panic(fmt.Sprintf("unexpect encryption level: %s", e)) + } +} + +const extraPrefix = "quic-go1" + +func addExtraPrefix(b []byte) []byte { + return append([]byte(extraPrefix), b...) +} + +func findExtraData(extras [][]byte) []byte { + prefix := []byte(extraPrefix) + for _, extra := range extras { + if len(extra) < len(prefix) || !bytes.Equal(prefix, extra[:len(prefix)]) { + continue + } + return extra[len(prefix):] + } + return nil +} diff --git a/vendor/github.com/quic-go/quic-go/internal/utils/buffered_write_closer.go b/vendor/github.com/quic-go/quic-go/internal/utils/buffered_write_closer.go new file mode 100644 index 00000000..b5b9d6fc --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/utils/buffered_write_closer.go @@ -0,0 +1,26 @@ +package utils + +import ( + "bufio" + "io" +) + +type bufferedWriteCloser struct { + *bufio.Writer + io.Closer +} + +// NewBufferedWriteCloser creates an io.WriteCloser from a bufio.Writer and an io.Closer +func NewBufferedWriteCloser(writer *bufio.Writer, closer io.Closer) io.WriteCloser { + return &bufferedWriteCloser{ + Writer: writer, + Closer: closer, + } +} + +func (h bufferedWriteCloser) Close() error { + if err := h.Writer.Flush(); err != nil { + return err + } + return h.Closer.Close() +} diff --git a/vendor/github.com/quic-go/quic-go/internal/utils/linkedlist/README.md b/vendor/github.com/quic-go/quic-go/internal/utils/linkedlist/README.md new file mode 100644 index 00000000..66482f4f --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/utils/linkedlist/README.md @@ -0,0 +1,6 @@ +# Usage + +This is the Go standard library implementation of a linked list +(https://golang.org/src/container/list/list.go), with the following modifications: +* it uses Go generics +* it allows passing in a `sync.Pool` (via the `NewWithPool` constructor) to reduce allocations of `Element` structs diff --git a/vendor/github.com/quic-go/quic-go/internal/utils/linkedlist/linkedlist.go b/vendor/github.com/quic-go/quic-go/internal/utils/linkedlist/linkedlist.go new file mode 100644 index 00000000..804a3444 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/utils/linkedlist/linkedlist.go @@ -0,0 +1,264 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package list implements a doubly linked list. +// +// To iterate over a list (where l is a *List[T]): +// +// for e := l.Front(); e != nil; e = e.Next() { +// // do something with e.Value +// } +package list + +import "sync" + +func NewPool[T any]() *sync.Pool { + return &sync.Pool{New: func() any { return &Element[T]{} }} +} + +// Element is an element of a linked list. +type Element[T any] struct { + // Next and previous pointers in the doubly-linked list of elements. + // To simplify the implementation, internally a list l is implemented + // as a ring, such that &l.root is both the next element of the last + // list element (l.Back()) and the previous element of the first list + // element (l.Front()). + next, prev *Element[T] + + // The list to which this element belongs. + list *List[T] + + // The value stored with this element. + Value T +} + +// Next returns the next list element or nil. +func (e *Element[T]) Next() *Element[T] { + if p := e.next; e.list != nil && p != &e.list.root { + return p + } + return nil +} + +// Prev returns the previous list element or nil. +func (e *Element[T]) Prev() *Element[T] { + if p := e.prev; e.list != nil && p != &e.list.root { + return p + } + return nil +} + +func (e *Element[T]) List() *List[T] { + return e.list +} + +// List represents a doubly linked list. +// The zero value for List is an empty list ready to use. +type List[T any] struct { + root Element[T] // sentinel list element, only &root, root.prev, and root.next are used + len int // current list length excluding (this) sentinel element + + pool *sync.Pool +} + +// Init initializes or clears list l. +func (l *List[T]) Init() *List[T] { + l.root.next = &l.root + l.root.prev = &l.root + l.len = 0 + return l +} + +// New returns an initialized list. +func New[T any]() *List[T] { return new(List[T]).Init() } + +// NewWithPool returns an initialized list, using a sync.Pool for list elements. +func NewWithPool[T any](pool *sync.Pool) *List[T] { + l := &List[T]{pool: pool} + return l.Init() +} + +// Len returns the number of elements of list l. +// The complexity is O(1). +func (l *List[T]) Len() int { return l.len } + +// Front returns the first element of list l or nil if the list is empty. +func (l *List[T]) Front() *Element[T] { + if l.len == 0 { + return nil + } + return l.root.next +} + +// Back returns the last element of list l or nil if the list is empty. +func (l *List[T]) Back() *Element[T] { + if l.len == 0 { + return nil + } + return l.root.prev +} + +// lazyInit lazily initializes a zero List value. +func (l *List[T]) lazyInit() { + if l.root.next == nil { + l.Init() + } +} + +// insert inserts e after at, increments l.len, and returns e. +func (l *List[T]) insert(e, at *Element[T]) *Element[T] { + e.prev = at + e.next = at.next + e.prev.next = e + e.next.prev = e + e.list = l + l.len++ + return e +} + +// insertValue is a convenience wrapper for insert(&Element{Value: v}, at). +func (l *List[T]) insertValue(v T, at *Element[T]) *Element[T] { + var e *Element[T] + if l.pool != nil { + e = l.pool.Get().(*Element[T]) + } else { + e = &Element[T]{} + } + e.Value = v + return l.insert(e, at) +} + +// remove removes e from its list, decrements l.len +func (l *List[T]) remove(e *Element[T]) { + e.prev.next = e.next + e.next.prev = e.prev + e.next = nil // avoid memory leaks + e.prev = nil // avoid memory leaks + e.list = nil + if l.pool != nil { + l.pool.Put(e) + } + l.len-- +} + +// move moves e to next to at. +func (l *List[T]) move(e, at *Element[T]) { + if e == at { + return + } + e.prev.next = e.next + e.next.prev = e.prev + + e.prev = at + e.next = at.next + e.prev.next = e + e.next.prev = e +} + +// Remove removes e from l if e is an element of list l. +// It returns the element value e.Value. +// The element must not be nil. +func (l *List[T]) Remove(e *Element[T]) T { + v := e.Value + if e.list == l { + // if e.list == l, l must have been initialized when e was inserted + // in l or l == nil (e is a zero Element) and l.remove will crash + l.remove(e) + } + return v +} + +// PushFront inserts a new element e with value v at the front of list l and returns e. +func (l *List[T]) PushFront(v T) *Element[T] { + l.lazyInit() + return l.insertValue(v, &l.root) +} + +// PushBack inserts a new element e with value v at the back of list l and returns e. +func (l *List[T]) PushBack(v T) *Element[T] { + l.lazyInit() + return l.insertValue(v, l.root.prev) +} + +// InsertBefore inserts a new element e with value v immediately before mark and returns e. +// If mark is not an element of l, the list is not modified. +// The mark must not be nil. +func (l *List[T]) InsertBefore(v T, mark *Element[T]) *Element[T] { + if mark.list != l { + return nil + } + // see comment in List.Remove about initialization of l + return l.insertValue(v, mark.prev) +} + +// InsertAfter inserts a new element e with value v immediately after mark and returns e. +// If mark is not an element of l, the list is not modified. +// The mark must not be nil. +func (l *List[T]) InsertAfter(v T, mark *Element[T]) *Element[T] { + if mark.list != l { + return nil + } + // see comment in List.Remove about initialization of l + return l.insertValue(v, mark) +} + +// MoveToFront moves element e to the front of list l. +// If e is not an element of l, the list is not modified. +// The element must not be nil. +func (l *List[T]) MoveToFront(e *Element[T]) { + if e.list != l || l.root.next == e { + return + } + // see comment in List.Remove about initialization of l + l.move(e, &l.root) +} + +// MoveToBack moves element e to the back of list l. +// If e is not an element of l, the list is not modified. +// The element must not be nil. +func (l *List[T]) MoveToBack(e *Element[T]) { + if e.list != l || l.root.prev == e { + return + } + // see comment in List.Remove about initialization of l + l.move(e, l.root.prev) +} + +// MoveBefore moves element e to its new position before mark. +// If e or mark is not an element of l, or e == mark, the list is not modified. +// The element and mark must not be nil. +func (l *List[T]) MoveBefore(e, mark *Element[T]) { + if e.list != l || e == mark || mark.list != l { + return + } + l.move(e, mark.prev) +} + +// MoveAfter moves element e to its new position after mark. +// If e or mark is not an element of l, or e == mark, the list is not modified. +// The element and mark must not be nil. +func (l *List[T]) MoveAfter(e, mark *Element[T]) { + if e.list != l || e == mark || mark.list != l { + return + } + l.move(e, mark) +} + +// PushBackList inserts a copy of another list at the back of list l. +// The lists l and other may be the same. They must not be nil. +func (l *List[T]) PushBackList(other *List[T]) { + l.lazyInit() + for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() { + l.insertValue(e.Value, l.root.prev) + } +} + +// PushFrontList inserts a copy of another list at the front of list l. +// The lists l and other may be the same. They must not be nil. +func (l *List[T]) PushFrontList(other *List[T]) { + l.lazyInit() + for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() { + l.insertValue(e.Value, &l.root) + } +} diff --git a/vendor/github.com/quic-go/quic-go/internal/utils/log.go b/vendor/github.com/quic-go/quic-go/internal/utils/log.go new file mode 100644 index 00000000..89b52c0d --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/utils/log.go @@ -0,0 +1,131 @@ +package utils + +import ( + "fmt" + "log" + "os" + "strings" + "time" +) + +// LogLevel of quic-go +type LogLevel uint8 + +const ( + // LogLevelNothing disables + LogLevelNothing LogLevel = iota + // LogLevelError enables err logs + LogLevelError + // LogLevelInfo enables info logs (e.g. packets) + LogLevelInfo + // LogLevelDebug enables debug logs (e.g. packet contents) + LogLevelDebug +) + +const logEnv = "QUIC_GO_LOG_LEVEL" + +// A Logger logs. +type Logger interface { + SetLogLevel(LogLevel) + SetLogTimeFormat(format string) + WithPrefix(prefix string) Logger + Debug() bool + + Errorf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Debugf(format string, args ...interface{}) +} + +// DefaultLogger is used by quic-go for logging. +var DefaultLogger Logger + +type defaultLogger struct { + prefix string + + logLevel LogLevel + timeFormat string +} + +var _ Logger = &defaultLogger{} + +// SetLogLevel sets the log level +func (l *defaultLogger) SetLogLevel(level LogLevel) { + l.logLevel = level +} + +// SetLogTimeFormat sets the format of the timestamp +// an empty string disables the logging of timestamps +func (l *defaultLogger) SetLogTimeFormat(format string) { + log.SetFlags(0) // disable timestamp logging done by the log package + l.timeFormat = format +} + +// Debugf logs something +func (l *defaultLogger) Debugf(format string, args ...interface{}) { + if l.logLevel == LogLevelDebug { + l.logMessage(format, args...) + } +} + +// Infof logs something +func (l *defaultLogger) Infof(format string, args ...interface{}) { + if l.logLevel >= LogLevelInfo { + l.logMessage(format, args...) + } +} + +// Errorf logs something +func (l *defaultLogger) Errorf(format string, args ...interface{}) { + if l.logLevel >= LogLevelError { + l.logMessage(format, args...) + } +} + +func (l *defaultLogger) logMessage(format string, args ...interface{}) { + var pre string + + if len(l.timeFormat) > 0 { + pre = time.Now().Format(l.timeFormat) + " " + } + if len(l.prefix) > 0 { + pre += l.prefix + " " + } + log.Printf(pre+format, args...) +} + +func (l *defaultLogger) WithPrefix(prefix string) Logger { + if len(l.prefix) > 0 { + prefix = l.prefix + " " + prefix + } + return &defaultLogger{ + logLevel: l.logLevel, + timeFormat: l.timeFormat, + prefix: prefix, + } +} + +// Debug returns true if the log level is LogLevelDebug +func (l *defaultLogger) Debug() bool { + return l.logLevel == LogLevelDebug +} + +func init() { + DefaultLogger = &defaultLogger{} + DefaultLogger.SetLogLevel(readLoggingEnv()) +} + +func readLoggingEnv() LogLevel { + switch strings.ToLower(os.Getenv(logEnv)) { + case "": + return LogLevelNothing + case "debug": + return LogLevelDebug + case "info": + return LogLevelInfo + case "error": + return LogLevelError + default: + fmt.Fprintln(os.Stderr, "invalid quic-go log level, see https://github.com/quic-go/quic-go/wiki/Logging") + return LogLevelNothing + } +} diff --git a/vendor/github.com/quic-go/quic-go/internal/utils/rand.go b/vendor/github.com/quic-go/quic-go/internal/utils/rand.go new file mode 100644 index 00000000..30069144 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/utils/rand.go @@ -0,0 +1,29 @@ +package utils + +import ( + "crypto/rand" + "encoding/binary" +) + +// Rand is a wrapper around crypto/rand that adds some convenience functions known from math/rand. +type Rand struct { + buf [4]byte +} + +func (r *Rand) Int31() int32 { + rand.Read(r.buf[:]) + return int32(binary.BigEndian.Uint32(r.buf[:]) & ^uint32(1<<31)) +} + +// copied from the standard library math/rand implementation of Int63n +func (r *Rand) Int31n(n int32) int32 { + if n&(n-1) == 0 { // n is power of two, can mask + return r.Int31() & (n - 1) + } + max := int32((1 << 31) - 1 - (1<<31)%uint32(n)) + v := r.Int31() + for v > max { + v = r.Int31() + } + return v % n +} diff --git a/vendor/github.com/quic-go/quic-go/internal/utils/ringbuffer/ringbuffer.go b/vendor/github.com/quic-go/quic-go/internal/utils/ringbuffer/ringbuffer.go new file mode 100644 index 00000000..f9b2c797 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/utils/ringbuffer/ringbuffer.go @@ -0,0 +1,96 @@ +package ringbuffer + +// A RingBuffer is a ring buffer. +// It acts as a heap that doesn't cause any allocations. +type RingBuffer[T any] struct { + ring []T + headPos, tailPos int + full bool +} + +// Init preallocates a buffer with a certain size. +func (r *RingBuffer[T]) Init(size int) { + r.ring = make([]T, size) +} + +// Len returns the number of elements in the ring buffer. +func (r *RingBuffer[T]) Len() int { + if r.full { + return len(r.ring) + } + if r.tailPos >= r.headPos { + return r.tailPos - r.headPos + } + return r.tailPos - r.headPos + len(r.ring) +} + +// Empty says if the ring buffer is empty. +func (r *RingBuffer[T]) Empty() bool { + return !r.full && r.headPos == r.tailPos +} + +// PushBack adds a new element. +// If the ring buffer is full, its capacity is increased first. +func (r *RingBuffer[T]) PushBack(t T) { + if r.full || len(r.ring) == 0 { + r.grow() + } + r.ring[r.tailPos] = t + r.tailPos++ + if r.tailPos == len(r.ring) { + r.tailPos = 0 + } + if r.tailPos == r.headPos { + r.full = true + } +} + +// PopFront returns the next element. +// It must not be called when the buffer is empty, that means that +// callers might need to check if there are elements in the buffer first. +func (r *RingBuffer[T]) PopFront() T { + if r.Empty() { + panic("github.com/quic-go/quic-go/internal/utils/ringbuffer: pop from an empty queue") + } + r.full = false + t := r.ring[r.headPos] + r.ring[r.headPos] = *new(T) + r.headPos++ + if r.headPos == len(r.ring) { + r.headPos = 0 + } + return t +} + +// PeekFront returns the next element. +// It must not be called when the buffer is empty, that means that +// callers might need to check if there are elements in the buffer first. +func (r *RingBuffer[T]) PeekFront() T { + if r.Empty() { + panic("github.com/quic-go/quic-go/internal/utils/ringbuffer: peek from an empty queue") + } + return r.ring[r.headPos] +} + +// Grow the maximum size of the queue. +// This method assume the queue is full. +func (r *RingBuffer[T]) grow() { + oldRing := r.ring + newSize := len(oldRing) * 2 + if newSize == 0 { + newSize = 1 + } + r.ring = make([]T, newSize) + headLen := copy(r.ring, oldRing[r.headPos:]) + copy(r.ring[headLen:], oldRing[:r.headPos]) + r.headPos, r.tailPos, r.full = 0, len(oldRing), false +} + +// Clear removes all elements. +func (r *RingBuffer[T]) Clear() { + var zeroValue T + for i := range r.ring { + r.ring[i] = zeroValue + } + r.headPos, r.tailPos, r.full = 0, 0, false +} diff --git a/vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go b/vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go new file mode 100644 index 00000000..24e1f64c --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go @@ -0,0 +1,131 @@ +package utils + +import ( + "time" + + "github.com/quic-go/quic-go/internal/protocol" +) + +const ( + rttAlpha = 0.125 + oneMinusAlpha = 1 - rttAlpha + rttBeta = 0.25 + oneMinusBeta = 1 - rttBeta + // The default RTT used before an RTT sample is taken. + defaultInitialRTT = 100 * time.Millisecond +) + +// RTTStats provides round-trip statistics +type RTTStats struct { + hasMeasurement bool + + minRTT time.Duration + latestRTT time.Duration + smoothedRTT time.Duration + meanDeviation time.Duration + + maxAckDelay time.Duration +} + +// NewRTTStats makes a properly initialized RTTStats object +func NewRTTStats() *RTTStats { + return &RTTStats{} +} + +// MinRTT Returns the minRTT for the entire connection. +// May return Zero if no valid updates have occurred. +func (r *RTTStats) MinRTT() time.Duration { return r.minRTT } + +// LatestRTT returns the most recent rtt measurement. +// May return Zero if no valid updates have occurred. +func (r *RTTStats) LatestRTT() time.Duration { return r.latestRTT } + +// SmoothedRTT returns the smoothed RTT for the connection. +// May return Zero if no valid updates have occurred. +func (r *RTTStats) SmoothedRTT() time.Duration { return r.smoothedRTT } + +// MeanDeviation gets the mean deviation +func (r *RTTStats) MeanDeviation() time.Duration { return r.meanDeviation } + +// MaxAckDelay gets the max_ack_delay advertised by the peer +func (r *RTTStats) MaxAckDelay() time.Duration { return r.maxAckDelay } + +// PTO gets the probe timeout duration. +func (r *RTTStats) PTO(includeMaxAckDelay bool) time.Duration { + if r.SmoothedRTT() == 0 { + return 2 * defaultInitialRTT + } + pto := r.SmoothedRTT() + max(4*r.MeanDeviation(), protocol.TimerGranularity) + if includeMaxAckDelay { + pto += r.MaxAckDelay() + } + return pto +} + +// UpdateRTT updates the RTT based on a new sample. +func (r *RTTStats) UpdateRTT(sendDelta, ackDelay time.Duration, now time.Time) { + if sendDelta <= 0 { + return + } + + // Update r.minRTT first. r.minRTT does not use an rttSample corrected for + // ackDelay but the raw observed sendDelta, since poor clock granularity at + // the client may cause a high ackDelay to result in underestimation of the + // r.minRTT. + if r.minRTT == 0 || r.minRTT > sendDelta { + r.minRTT = sendDelta + } + + // Correct for ackDelay if information received from the peer results in a + // an RTT sample at least as large as minRTT. Otherwise, only use the + // sendDelta. + sample := sendDelta + if sample-r.minRTT >= ackDelay { + sample -= ackDelay + } + r.latestRTT = sample + // First time call. + if !r.hasMeasurement { + r.hasMeasurement = true + r.smoothedRTT = sample + r.meanDeviation = sample / 2 + } else { + r.meanDeviation = time.Duration(oneMinusBeta*float32(r.meanDeviation/time.Microsecond)+rttBeta*float32((r.smoothedRTT-sample).Abs()/time.Microsecond)) * time.Microsecond + r.smoothedRTT = time.Duration((float32(r.smoothedRTT/time.Microsecond)*oneMinusAlpha)+(float32(sample/time.Microsecond)*rttAlpha)) * time.Microsecond + } +} + +// SetMaxAckDelay sets the max_ack_delay +func (r *RTTStats) SetMaxAckDelay(mad time.Duration) { + r.maxAckDelay = mad +} + +// SetInitialRTT sets the initial RTT. +// It is used during the 0-RTT handshake when restoring the RTT stats from the session state. +func (r *RTTStats) SetInitialRTT(t time.Duration) { + // On the server side, by the time we get to process the session ticket, + // we might already have obtained an RTT measurement. + // This can happen if we received the ClientHello in multiple pieces, and one of those pieces was lost. + // Discard the restored value. A fresh measurement is always better. + if r.hasMeasurement { + return + } + r.smoothedRTT = t + r.latestRTT = t +} + +// OnConnectionMigration is called when connection migrates and rtt measurement needs to be reset. +func (r *RTTStats) OnConnectionMigration() { + r.latestRTT = 0 + r.minRTT = 0 + r.smoothedRTT = 0 + r.meanDeviation = 0 +} + +// ExpireSmoothedMetrics causes the smoothed_rtt to be increased to the latest_rtt if the latest_rtt +// is larger. The mean deviation is increased to the most recent deviation if +// it's larger. +func (r *RTTStats) ExpireSmoothedMetrics() { + r.meanDeviation = max(r.meanDeviation, (r.smoothedRTT - r.latestRTT).Abs()) + r.smoothedRTT = max(r.smoothedRTT, r.latestRTT) +} diff --git a/vendor/github.com/quic-go/quic-go/internal/utils/timer.go b/vendor/github.com/quic-go/quic-go/internal/utils/timer.go new file mode 100644 index 00000000..361106c8 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/utils/timer.go @@ -0,0 +1,57 @@ +package utils + +import ( + "math" + "time" +) + +// A Timer wrapper that behaves correctly when resetting +type Timer struct { + t *time.Timer + read bool + deadline time.Time +} + +// NewTimer creates a new timer that is not set +func NewTimer() *Timer { + return &Timer{t: time.NewTimer(time.Duration(math.MaxInt64))} +} + +// Chan returns the channel of the wrapped timer +func (t *Timer) Chan() <-chan time.Time { + return t.t.C +} + +// Reset the timer, no matter whether the value was read or not +func (t *Timer) Reset(deadline time.Time) { + if deadline.Equal(t.deadline) && !t.read { + // No need to reset the timer + return + } + + // We need to drain the timer if the value from its channel was not read yet. + // See https://groups.google.com/forum/#!topic/golang-dev/c9UUfASVPoU + if !t.t.Stop() && !t.read { + <-t.t.C + } + if !deadline.IsZero() { + t.t.Reset(time.Until(deadline)) + } + + t.read = false + t.deadline = deadline +} + +// SetRead should be called after the value from the chan was read +func (t *Timer) SetRead() { + t.read = true +} + +func (t *Timer) Deadline() time.Time { + return t.deadline +} + +// Stop stops the timer +func (t *Timer) Stop() { + t.t.Stop() +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/ack_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/ack_frame.go new file mode 100644 index 00000000..8befef4f --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/ack_frame.go @@ -0,0 +1,275 @@ +package wire + +import ( + "errors" + "math" + "sort" + "time" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" +) + +var errInvalidAckRanges = errors.New("AckFrame: ACK frame contains invalid ACK ranges") + +// An AckFrame is an ACK frame +type AckFrame struct { + AckRanges []AckRange // has to be ordered. The highest ACK range goes first, the lowest ACK range goes last + DelayTime time.Duration + + ECT0, ECT1, ECNCE uint64 +} + +// parseAckFrame reads an ACK frame +func parseAckFrame(frame *AckFrame, b []byte, typ uint64, ackDelayExponent uint8, _ protocol.Version) (int, error) { + startLen := len(b) + ecn := typ == ackECNFrameType + + la, l, err := quicvarint.Parse(b) + if err != nil { + return 0, replaceUnexpectedEOF(err) + } + b = b[l:] + largestAcked := protocol.PacketNumber(la) + delay, l, err := quicvarint.Parse(b) + if err != nil { + return 0, replaceUnexpectedEOF(err) + } + b = b[l:] + + delayTime := time.Duration(delay*1< largestAcked { + return 0, errors.New("invalid first ACK range") + } + smallest := largestAcked - ackBlock + frame.AckRanges = append(frame.AckRanges, AckRange{Smallest: smallest, Largest: largestAcked}) + + // read all the other ACK ranges + for i := uint64(0); i < numBlocks; i++ { + g, l, err := quicvarint.Parse(b) + if err != nil { + return 0, replaceUnexpectedEOF(err) + } + b = b[l:] + gap := protocol.PacketNumber(g) + if smallest < gap+2 { + return 0, errInvalidAckRanges + } + largest := smallest - gap - 2 + + ab, l, err := quicvarint.Parse(b) + if err != nil { + return 0, replaceUnexpectedEOF(err) + } + b = b[l:] + ackBlock := protocol.PacketNumber(ab) + + if ackBlock > largest { + return 0, errInvalidAckRanges + } + smallest = largest - ackBlock + frame.AckRanges = append(frame.AckRanges, AckRange{Smallest: smallest, Largest: largest}) + } + + if !frame.validateAckRanges() { + return 0, errInvalidAckRanges + } + + if ecn { + ect0, l, err := quicvarint.Parse(b) + if err != nil { + return 0, replaceUnexpectedEOF(err) + } + b = b[l:] + frame.ECT0 = ect0 + ect1, l, err := quicvarint.Parse(b) + if err != nil { + return 0, replaceUnexpectedEOF(err) + } + b = b[l:] + frame.ECT1 = ect1 + ecnce, l, err := quicvarint.Parse(b) + if err != nil { + return 0, replaceUnexpectedEOF(err) + } + b = b[l:] + frame.ECNCE = ecnce + } + + return startLen - len(b), nil +} + +// Append appends an ACK frame. +func (f *AckFrame) Append(b []byte, _ protocol.Version) ([]byte, error) { + hasECN := f.ECT0 > 0 || f.ECT1 > 0 || f.ECNCE > 0 + if hasECN { + b = append(b, ackECNFrameType) + } else { + b = append(b, ackFrameType) + } + b = quicvarint.Append(b, uint64(f.LargestAcked())) + b = quicvarint.Append(b, encodeAckDelay(f.DelayTime)) + + numRanges := f.numEncodableAckRanges() + b = quicvarint.Append(b, uint64(numRanges-1)) + + // write the first range + _, firstRange := f.encodeAckRange(0) + b = quicvarint.Append(b, firstRange) + + // write all the other range + for i := 1; i < numRanges; i++ { + gap, len := f.encodeAckRange(i) + b = quicvarint.Append(b, gap) + b = quicvarint.Append(b, len) + } + + if hasECN { + b = quicvarint.Append(b, f.ECT0) + b = quicvarint.Append(b, f.ECT1) + b = quicvarint.Append(b, f.ECNCE) + } + return b, nil +} + +// Length of a written frame +func (f *AckFrame) Length(_ protocol.Version) protocol.ByteCount { + largestAcked := f.AckRanges[0].Largest + numRanges := f.numEncodableAckRanges() + + length := 1 + quicvarint.Len(uint64(largestAcked)) + quicvarint.Len(encodeAckDelay(f.DelayTime)) + + length += quicvarint.Len(uint64(numRanges - 1)) + lowestInFirstRange := f.AckRanges[0].Smallest + length += quicvarint.Len(uint64(largestAcked - lowestInFirstRange)) + + for i := 1; i < numRanges; i++ { + gap, len := f.encodeAckRange(i) + length += quicvarint.Len(gap) + length += quicvarint.Len(len) + } + if f.ECT0 > 0 || f.ECT1 > 0 || f.ECNCE > 0 { + length += quicvarint.Len(f.ECT0) + length += quicvarint.Len(f.ECT1) + length += quicvarint.Len(f.ECNCE) + } + return protocol.ByteCount(length) +} + +// gets the number of ACK ranges that can be encoded +// such that the resulting frame is smaller than the maximum ACK frame size +func (f *AckFrame) numEncodableAckRanges() int { + length := 1 + quicvarint.Len(uint64(f.LargestAcked())) + quicvarint.Len(encodeAckDelay(f.DelayTime)) + length += 2 // assume that the number of ranges will consume 2 bytes + for i := 1; i < len(f.AckRanges); i++ { + gap, len := f.encodeAckRange(i) + rangeLen := quicvarint.Len(gap) + quicvarint.Len(len) + if protocol.ByteCount(length+rangeLen) > protocol.MaxAckFrameSize { + // Writing range i would exceed the MaxAckFrameSize. + // So encode one range less than that. + return i - 1 + } + length += rangeLen + } + return len(f.AckRanges) +} + +func (f *AckFrame) encodeAckRange(i int) (uint64 /* gap */, uint64 /* length */) { + if i == 0 { + return 0, uint64(f.AckRanges[0].Largest - f.AckRanges[0].Smallest) + } + return uint64(f.AckRanges[i-1].Smallest - f.AckRanges[i].Largest - 2), + uint64(f.AckRanges[i].Largest - f.AckRanges[i].Smallest) +} + +// HasMissingRanges returns if this frame reports any missing packets +func (f *AckFrame) HasMissingRanges() bool { + return len(f.AckRanges) > 1 +} + +func (f *AckFrame) validateAckRanges() bool { + if len(f.AckRanges) == 0 { + return false + } + + // check the validity of every single ACK range + for _, ackRange := range f.AckRanges { + if ackRange.Smallest > ackRange.Largest { + return false + } + } + + // check the consistency for ACK with multiple NACK ranges + for i, ackRange := range f.AckRanges { + if i == 0 { + continue + } + lastAckRange := f.AckRanges[i-1] + if lastAckRange.Smallest <= ackRange.Smallest { + return false + } + if lastAckRange.Smallest <= ackRange.Largest+1 { + return false + } + } + + return true +} + +// LargestAcked is the largest acked packet number +func (f *AckFrame) LargestAcked() protocol.PacketNumber { + return f.AckRanges[0].Largest +} + +// LowestAcked is the lowest acked packet number +func (f *AckFrame) LowestAcked() protocol.PacketNumber { + return f.AckRanges[len(f.AckRanges)-1].Smallest +} + +// AcksPacket determines if this ACK frame acks a certain packet number +func (f *AckFrame) AcksPacket(p protocol.PacketNumber) bool { + if p < f.LowestAcked() || p > f.LargestAcked() { + return false + } + + i := sort.Search(len(f.AckRanges), func(i int) bool { + return p >= f.AckRanges[i].Smallest + }) + // i will always be < len(f.AckRanges), since we checked above that p is not bigger than the largest acked + return p <= f.AckRanges[i].Largest +} + +func (f *AckFrame) Reset() { + f.DelayTime = 0 + f.ECT0 = 0 + f.ECT1 = 0 + f.ECNCE = 0 + for _, r := range f.AckRanges { + r.Largest = 0 + r.Smallest = 0 + } + f.AckRanges = f.AckRanges[:0] +} + +func encodeAckDelay(delay time.Duration) uint64 { + return uint64(delay.Nanoseconds() / (1000 * (1 << protocol.AckDelayExponent))) +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/ack_range.go b/vendor/github.com/quic-go/quic-go/internal/wire/ack_range.go new file mode 100644 index 00000000..03a1235e --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/ack_range.go @@ -0,0 +1,14 @@ +package wire + +import "github.com/quic-go/quic-go/internal/protocol" + +// AckRange is an ACK range +type AckRange struct { + Smallest protocol.PacketNumber + Largest protocol.PacketNumber +} + +// Len returns the number of packets contained in this ACK range +func (r AckRange) Len() protocol.PacketNumber { + return r.Largest - r.Smallest + 1 +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/connection_close_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/connection_close_frame.go new file mode 100644 index 00000000..be11a1b2 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/connection_close_frame.go @@ -0,0 +1,75 @@ +package wire + +import ( + "io" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" +) + +// A ConnectionCloseFrame is a CONNECTION_CLOSE frame +type ConnectionCloseFrame struct { + IsApplicationError bool + ErrorCode uint64 + FrameType uint64 + ReasonPhrase string +} + +func parseConnectionCloseFrame(b []byte, typ uint64, _ protocol.Version) (*ConnectionCloseFrame, int, error) { + startLen := len(b) + f := &ConnectionCloseFrame{IsApplicationError: typ == applicationCloseFrameType} + ec, l, err := quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + b = b[l:] + f.ErrorCode = ec + // read the Frame Type, if this is not an application error + if !f.IsApplicationError { + ft, l, err := quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + b = b[l:] + f.FrameType = ft + } + var reasonPhraseLen uint64 + reasonPhraseLen, l, err = quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + b = b[l:] + if int(reasonPhraseLen) > len(b) { + return nil, 0, io.EOF + } + + reasonPhrase := make([]byte, reasonPhraseLen) + copy(reasonPhrase, b) + f.ReasonPhrase = string(reasonPhrase) + return f, startLen - len(b) + int(reasonPhraseLen), nil +} + +// Length of a written frame +func (f *ConnectionCloseFrame) Length(protocol.Version) protocol.ByteCount { + length := 1 + protocol.ByteCount(quicvarint.Len(f.ErrorCode)+quicvarint.Len(uint64(len(f.ReasonPhrase)))) + protocol.ByteCount(len(f.ReasonPhrase)) + if !f.IsApplicationError { + length += protocol.ByteCount(quicvarint.Len(f.FrameType)) // for the frame type + } + return length +} + +func (f *ConnectionCloseFrame) Append(b []byte, _ protocol.Version) ([]byte, error) { + if f.IsApplicationError { + b = append(b, applicationCloseFrameType) + } else { + b = append(b, connectionCloseFrameType) + } + + b = quicvarint.Append(b, f.ErrorCode) + if !f.IsApplicationError { + b = quicvarint.Append(b, f.FrameType) + } + b = quicvarint.Append(b, uint64(len(f.ReasonPhrase))) + b = append(b, []byte(f.ReasonPhrase)...) + return b, nil +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/crypto_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/crypto_frame.go new file mode 100644 index 00000000..0aa7fe7b --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/crypto_frame.go @@ -0,0 +1,97 @@ +package wire + +import ( + "io" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" +) + +// A CryptoFrame is a CRYPTO frame +type CryptoFrame struct { + Offset protocol.ByteCount + Data []byte +} + +func parseCryptoFrame(b []byte, _ protocol.Version) (*CryptoFrame, int, error) { + startLen := len(b) + frame := &CryptoFrame{} + offset, l, err := quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + b = b[l:] + frame.Offset = protocol.ByteCount(offset) + dataLen, l, err := quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + b = b[l:] + if dataLen > uint64(len(b)) { + return nil, 0, io.EOF + } + if dataLen != 0 { + frame.Data = make([]byte, dataLen) + copy(frame.Data, b) + } + return frame, startLen - len(b) + int(dataLen), nil +} + +func (f *CryptoFrame) Append(b []byte, _ protocol.Version) ([]byte, error) { + b = append(b, cryptoFrameType) + b = quicvarint.Append(b, uint64(f.Offset)) + b = quicvarint.Append(b, uint64(len(f.Data))) + b = append(b, f.Data...) + return b, nil +} + +// Length of a written frame +func (f *CryptoFrame) Length(_ protocol.Version) protocol.ByteCount { + return protocol.ByteCount(1 + quicvarint.Len(uint64(f.Offset)) + quicvarint.Len(uint64(len(f.Data))) + len(f.Data)) +} + +// MaxDataLen returns the maximum data length +func (f *CryptoFrame) MaxDataLen(maxSize protocol.ByteCount) protocol.ByteCount { + // pretend that the data size will be 1 bytes + // if it turns out that varint encoding the length will consume 2 bytes, we need to adjust the data length afterwards + headerLen := protocol.ByteCount(1 + quicvarint.Len(uint64(f.Offset)) + 1) + if headerLen > maxSize { + return 0 + } + maxDataLen := maxSize - headerLen + if quicvarint.Len(uint64(maxDataLen)) != 1 { + maxDataLen-- + } + return maxDataLen +} + +// MaybeSplitOffFrame splits a frame such that it is not bigger than n bytes. +// It returns if the frame was actually split. +// The frame might not be split if: +// * the size is large enough to fit the whole frame +// * the size is too small to fit even a 1-byte frame. In that case, the frame returned is nil. +func (f *CryptoFrame) MaybeSplitOffFrame(maxSize protocol.ByteCount, version protocol.Version) (*CryptoFrame, bool /* was splitting required */) { + if f.Length(version) <= maxSize { + return nil, false + } + + n := f.MaxDataLen(maxSize) + if n == 0 { + return nil, true + } + + newLen := protocol.ByteCount(len(f.Data)) - n + + new := &CryptoFrame{} + new.Offset = f.Offset + new.Data = make([]byte, newLen) + + // swap the data slices + new.Data, f.Data = f.Data, new.Data + + copy(f.Data, new.Data[n:]) + new.Data = new.Data[:n] + f.Offset += n + + return new, true +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/data_blocked_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/data_blocked_frame.go new file mode 100644 index 00000000..c97d4c62 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/data_blocked_frame.go @@ -0,0 +1,29 @@ +package wire + +import ( + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" +) + +// A DataBlockedFrame is a DATA_BLOCKED frame +type DataBlockedFrame struct { + MaximumData protocol.ByteCount +} + +func parseDataBlockedFrame(b []byte, _ protocol.Version) (*DataBlockedFrame, int, error) { + offset, l, err := quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + return &DataBlockedFrame{MaximumData: protocol.ByteCount(offset)}, l, nil +} + +func (f *DataBlockedFrame) Append(b []byte, version protocol.Version) ([]byte, error) { + b = append(b, dataBlockedFrameType) + return quicvarint.Append(b, uint64(f.MaximumData)), nil +} + +// Length of a written frame +func (f *DataBlockedFrame) Length(version protocol.Version) protocol.ByteCount { + return 1 + protocol.ByteCount(quicvarint.Len(uint64(f.MaximumData))) +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/datagram_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/datagram_frame.go new file mode 100644 index 00000000..071fda9a --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/datagram_frame.go @@ -0,0 +1,85 @@ +package wire + +import ( + "io" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" +) + +// MaxDatagramSize is the maximum size of a DATAGRAM frame (RFC 9221). +// By setting it to a large value, we allow all datagrams that fit into a QUIC packet. +// The value is chosen such that it can still be encoded as a 2 byte varint. +// This is a var and not a const so it can be set in tests. +var MaxDatagramSize protocol.ByteCount = 16383 + +// A DatagramFrame is a DATAGRAM frame +type DatagramFrame struct { + DataLenPresent bool + Data []byte +} + +func parseDatagramFrame(b []byte, typ uint64, _ protocol.Version) (*DatagramFrame, int, error) { + startLen := len(b) + f := &DatagramFrame{} + f.DataLenPresent = typ&0x1 > 0 + + var length uint64 + if f.DataLenPresent { + var err error + var l int + length, l, err = quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + b = b[l:] + if length > uint64(len(b)) { + return nil, 0, io.EOF + } + } else { + length = uint64(len(b)) + } + f.Data = make([]byte, length) + copy(f.Data, b) + return f, startLen - len(b) + int(length), nil +} + +func (f *DatagramFrame) Append(b []byte, _ protocol.Version) ([]byte, error) { + typ := uint8(0x30) + if f.DataLenPresent { + typ ^= 0b1 + } + b = append(b, typ) + if f.DataLenPresent { + b = quicvarint.Append(b, uint64(len(f.Data))) + } + b = append(b, f.Data...) + return b, nil +} + +// MaxDataLen returns the maximum data length +func (f *DatagramFrame) MaxDataLen(maxSize protocol.ByteCount, version protocol.Version) protocol.ByteCount { + headerLen := protocol.ByteCount(1) + if f.DataLenPresent { + // pretend that the data size will be 1 bytes + // if it turns out that varint encoding the length will consume 2 bytes, we need to adjust the data length afterwards + headerLen++ + } + if headerLen > maxSize { + return 0 + } + maxDataLen := maxSize - headerLen + if f.DataLenPresent && quicvarint.Len(uint64(maxDataLen)) != 1 { + maxDataLen-- + } + return maxDataLen +} + +// Length of a written frame +func (f *DatagramFrame) Length(_ protocol.Version) protocol.ByteCount { + length := 1 + protocol.ByteCount(len(f.Data)) + if f.DataLenPresent { + length += protocol.ByteCount(quicvarint.Len(uint64(len(f.Data)))) + } + return length +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/extended_header.go b/vendor/github.com/quic-go/quic-go/internal/wire/extended_header.go new file mode 100644 index 00000000..1c6ad991 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/extended_header.go @@ -0,0 +1,166 @@ +package wire + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/quicvarint" +) + +// ErrInvalidReservedBits is returned when the reserved bits are incorrect. +// When this error is returned, parsing continues, and an ExtendedHeader is returned. +// This is necessary because we need to decrypt the packet in that case, +// in order to avoid a timing side-channel. +var ErrInvalidReservedBits = errors.New("invalid reserved bits") + +// ExtendedHeader is the header of a QUIC packet. +type ExtendedHeader struct { + Header + + typeByte byte + + KeyPhase protocol.KeyPhaseBit + + PacketNumberLen protocol.PacketNumberLen + PacketNumber protocol.PacketNumber + + parsedLen protocol.ByteCount +} + +func (h *ExtendedHeader) parse(data []byte) (bool /* reserved bits valid */, error) { + // read the (now unencrypted) first byte + h.typeByte = data[0] + h.PacketNumberLen = protocol.PacketNumberLen(h.typeByte&0x3) + 1 + if protocol.ByteCount(len(data)) < h.Header.ParsedLen()+protocol.ByteCount(h.PacketNumberLen) { + return false, io.EOF + } + + pn, err := readPacketNumber(data[h.Header.ParsedLen():], h.PacketNumberLen) + if err != nil { + return true, nil + } + h.PacketNumber = pn + reservedBitsValid := h.typeByte&0xc == 0 + + h.parsedLen = h.Header.ParsedLen() + protocol.ByteCount(h.PacketNumberLen) + return reservedBitsValid, err +} + +// Append appends the Header. +func (h *ExtendedHeader) Append(b []byte, v protocol.Version) ([]byte, error) { + if h.DestConnectionID.Len() > protocol.MaxConnIDLen { + return nil, fmt.Errorf("invalid connection ID length: %d bytes", h.DestConnectionID.Len()) + } + if h.SrcConnectionID.Len() > protocol.MaxConnIDLen { + return nil, fmt.Errorf("invalid connection ID length: %d bytes", h.SrcConnectionID.Len()) + } + + var packetType uint8 + if v == protocol.Version2 { + //nolint:exhaustive + switch h.Type { + case protocol.PacketTypeInitial: + packetType = 0b01 + case protocol.PacketType0RTT: + packetType = 0b10 + case protocol.PacketTypeHandshake: + packetType = 0b11 + case protocol.PacketTypeRetry: + packetType = 0b00 + } + } else { + //nolint:exhaustive + switch h.Type { + case protocol.PacketTypeInitial: + packetType = 0b00 + case protocol.PacketType0RTT: + packetType = 0b01 + case protocol.PacketTypeHandshake: + packetType = 0b10 + case protocol.PacketTypeRetry: + packetType = 0b11 + } + } + firstByte := 0xc0 | packetType<<4 + if h.Type != protocol.PacketTypeRetry { + // Retry packets don't have a packet number + firstByte |= uint8(h.PacketNumberLen - 1) + } + + b = append(b, firstByte) + b = append(b, make([]byte, 4)...) + binary.BigEndian.PutUint32(b[len(b)-4:], uint32(h.Version)) + b = append(b, uint8(h.DestConnectionID.Len())) + b = append(b, h.DestConnectionID.Bytes()...) + b = append(b, uint8(h.SrcConnectionID.Len())) + b = append(b, h.SrcConnectionID.Bytes()...) + + //nolint:exhaustive + switch h.Type { + case protocol.PacketTypeRetry: + b = append(b, h.Token...) + return b, nil + case protocol.PacketTypeInitial: + b = quicvarint.Append(b, uint64(len(h.Token))) + b = append(b, h.Token...) + } + b = quicvarint.AppendWithLen(b, uint64(h.Length), 2) + return appendPacketNumber(b, h.PacketNumber, h.PacketNumberLen) +} + +// ParsedLen returns the number of bytes that were consumed when parsing the header +func (h *ExtendedHeader) ParsedLen() protocol.ByteCount { + return h.parsedLen +} + +// GetLength determines the length of the Header. +func (h *ExtendedHeader) GetLength(_ protocol.Version) protocol.ByteCount { + length := 1 /* type byte */ + 4 /* version */ + 1 /* dest conn ID len */ + protocol.ByteCount(h.DestConnectionID.Len()) + 1 /* src conn ID len */ + protocol.ByteCount(h.SrcConnectionID.Len()) + protocol.ByteCount(h.PacketNumberLen) + 2 /* length */ + if h.Type == protocol.PacketTypeInitial { + length += protocol.ByteCount(quicvarint.Len(uint64(len(h.Token))) + len(h.Token)) + } + return length +} + +// Log logs the Header +func (h *ExtendedHeader) Log(logger utils.Logger) { + var token string + if h.Type == protocol.PacketTypeInitial || h.Type == protocol.PacketTypeRetry { + if len(h.Token) == 0 { + token = "Token: (empty), " + } else { + token = fmt.Sprintf("Token: %#x, ", h.Token) + } + if h.Type == protocol.PacketTypeRetry { + logger.Debugf("\tLong Header{Type: %s, DestConnectionID: %s, SrcConnectionID: %s, %sVersion: %s}", h.Type, h.DestConnectionID, h.SrcConnectionID, token, h.Version) + return + } + } + logger.Debugf("\tLong Header{Type: %s, DestConnectionID: %s, SrcConnectionID: %s, %sPacketNumber: %d, PacketNumberLen: %d, Length: %d, Version: %s}", h.Type, h.DestConnectionID, h.SrcConnectionID, token, h.PacketNumber, h.PacketNumberLen, h.Length, h.Version) +} + +func appendPacketNumber(b []byte, pn protocol.PacketNumber, pnLen protocol.PacketNumberLen) ([]byte, error) { + switch pnLen { + case protocol.PacketNumberLen1: + b = append(b, uint8(pn)) + case protocol.PacketNumberLen2: + buf := make([]byte, 2) + binary.BigEndian.PutUint16(buf, uint16(pn)) + b = append(b, buf...) + case protocol.PacketNumberLen3: + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(pn)) + b = append(b, buf[1:]...) + case protocol.PacketNumberLen4: + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(pn)) + b = append(b, buf...) + default: + return nil, fmt.Errorf("invalid packet number length: %d", pnLen) + } + return b, nil +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/frame_parser.go b/vendor/github.com/quic-go/quic-go/internal/wire/frame_parser.go new file mode 100644 index 00000000..59d41444 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/frame_parser.go @@ -0,0 +1,197 @@ +package wire + +import ( + "errors" + "fmt" + "io" + "reflect" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/quicvarint" +) + +const ( + pingFrameType = 0x1 + ackFrameType = 0x2 + ackECNFrameType = 0x3 + resetStreamFrameType = 0x4 + stopSendingFrameType = 0x5 + cryptoFrameType = 0x6 + newTokenFrameType = 0x7 + maxDataFrameType = 0x10 + maxStreamDataFrameType = 0x11 + bidiMaxStreamsFrameType = 0x12 + uniMaxStreamsFrameType = 0x13 + dataBlockedFrameType = 0x14 + streamDataBlockedFrameType = 0x15 + bidiStreamBlockedFrameType = 0x16 + uniStreamBlockedFrameType = 0x17 + newConnectionIDFrameType = 0x18 + retireConnectionIDFrameType = 0x19 + pathChallengeFrameType = 0x1a + pathResponseFrameType = 0x1b + connectionCloseFrameType = 0x1c + applicationCloseFrameType = 0x1d + handshakeDoneFrameType = 0x1e +) + +// The FrameParser parses QUIC frames, one by one. +type FrameParser struct { + ackDelayExponent uint8 + supportsDatagrams bool + + // To avoid allocating when parsing, keep a single ACK frame struct. + // It is used over and over again. + ackFrame *AckFrame +} + +// NewFrameParser creates a new frame parser. +func NewFrameParser(supportsDatagrams bool) *FrameParser { + return &FrameParser{ + supportsDatagrams: supportsDatagrams, + ackFrame: &AckFrame{}, + } +} + +// ParseNext parses the next frame. +// It skips PADDING frames. +func (p *FrameParser) ParseNext(data []byte, encLevel protocol.EncryptionLevel, v protocol.Version) (int, Frame, error) { + frame, l, err := p.parseNext(data, encLevel, v) + return l, frame, err +} + +func (p *FrameParser) parseNext(b []byte, encLevel protocol.EncryptionLevel, v protocol.Version) (Frame, int, error) { + var parsed int + for len(b) != 0 { + typ, l, err := quicvarint.Parse(b) + parsed += l + if err != nil { + return nil, parsed, &qerr.TransportError{ + ErrorCode: qerr.FrameEncodingError, + ErrorMessage: err.Error(), + } + } + b = b[l:] + if typ == 0x0 { // skip PADDING frames + continue + } + + f, l, err := p.parseFrame(b, typ, encLevel, v) + parsed += l + if err != nil { + return nil, parsed, &qerr.TransportError{ + FrameType: typ, + ErrorCode: qerr.FrameEncodingError, + ErrorMessage: err.Error(), + } + } + return f, parsed, nil + } + return nil, parsed, nil +} + +func (p *FrameParser) parseFrame(b []byte, typ uint64, encLevel protocol.EncryptionLevel, v protocol.Version) (Frame, int, error) { + var frame Frame + var err error + var l int + if typ&0xf8 == 0x8 { + frame, l, err = parseStreamFrame(b, typ, v) + } else { + switch typ { + case pingFrameType: + frame = &PingFrame{} + case ackFrameType, ackECNFrameType: + ackDelayExponent := p.ackDelayExponent + if encLevel != protocol.Encryption1RTT { + ackDelayExponent = protocol.DefaultAckDelayExponent + } + p.ackFrame.Reset() + l, err = parseAckFrame(p.ackFrame, b, typ, ackDelayExponent, v) + frame = p.ackFrame + case resetStreamFrameType: + frame, l, err = parseResetStreamFrame(b, v) + case stopSendingFrameType: + frame, l, err = parseStopSendingFrame(b, v) + case cryptoFrameType: + frame, l, err = parseCryptoFrame(b, v) + case newTokenFrameType: + frame, l, err = parseNewTokenFrame(b, v) + case maxDataFrameType: + frame, l, err = parseMaxDataFrame(b, v) + case maxStreamDataFrameType: + frame, l, err = parseMaxStreamDataFrame(b, v) + case bidiMaxStreamsFrameType, uniMaxStreamsFrameType: + frame, l, err = parseMaxStreamsFrame(b, typ, v) + case dataBlockedFrameType: + frame, l, err = parseDataBlockedFrame(b, v) + case streamDataBlockedFrameType: + frame, l, err = parseStreamDataBlockedFrame(b, v) + case bidiStreamBlockedFrameType, uniStreamBlockedFrameType: + frame, l, err = parseStreamsBlockedFrame(b, typ, v) + case newConnectionIDFrameType: + frame, l, err = parseNewConnectionIDFrame(b, v) + case retireConnectionIDFrameType: + frame, l, err = parseRetireConnectionIDFrame(b, v) + case pathChallengeFrameType: + frame, l, err = parsePathChallengeFrame(b, v) + case pathResponseFrameType: + frame, l, err = parsePathResponseFrame(b, v) + case connectionCloseFrameType, applicationCloseFrameType: + frame, l, err = parseConnectionCloseFrame(b, typ, v) + case handshakeDoneFrameType: + frame = &HandshakeDoneFrame{} + case 0x30, 0x31: + if p.supportsDatagrams { + frame, l, err = parseDatagramFrame(b, typ, v) + break + } + fallthrough + default: + err = errors.New("unknown frame type") + } + } + if err != nil { + return nil, 0, err + } + if !p.isAllowedAtEncLevel(frame, encLevel) { + return nil, l, fmt.Errorf("%s not allowed at encryption level %s", reflect.TypeOf(frame).Elem().Name(), encLevel) + } + return frame, l, nil +} + +func (p *FrameParser) isAllowedAtEncLevel(f Frame, encLevel protocol.EncryptionLevel) bool { + switch encLevel { + case protocol.EncryptionInitial, protocol.EncryptionHandshake: + switch f.(type) { + case *CryptoFrame, *AckFrame, *ConnectionCloseFrame, *PingFrame: + return true + default: + return false + } + case protocol.Encryption0RTT: + switch f.(type) { + case *CryptoFrame, *AckFrame, *ConnectionCloseFrame, *NewTokenFrame, *PathResponseFrame, *RetireConnectionIDFrame: + return false + default: + return true + } + case protocol.Encryption1RTT: + return true + default: + panic("unknown encryption level") + } +} + +// SetAckDelayExponent sets the acknowledgment delay exponent (sent in the transport parameters). +// This value is used to scale the ACK Delay field in the ACK frame. +func (p *FrameParser) SetAckDelayExponent(exp uint8) { + p.ackDelayExponent = exp +} + +func replaceUnexpectedEOF(e error) error { + if e == io.ErrUnexpectedEOF { + return io.EOF + } + return e +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/handshake_done_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/handshake_done_frame.go new file mode 100644 index 00000000..85dd6474 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/handshake_done_frame.go @@ -0,0 +1,17 @@ +package wire + +import ( + "github.com/quic-go/quic-go/internal/protocol" +) + +// A HandshakeDoneFrame is a HANDSHAKE_DONE frame +type HandshakeDoneFrame struct{} + +func (f *HandshakeDoneFrame) Append(b []byte, _ protocol.Version) ([]byte, error) { + return append(b, handshakeDoneFrameType), nil +} + +// Length of a written frame +func (f *HandshakeDoneFrame) Length(_ protocol.Version) protocol.ByteCount { + return 1 +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/header.go b/vendor/github.com/quic-go/quic-go/internal/wire/header.go new file mode 100644 index 00000000..678a04a2 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/header.go @@ -0,0 +1,302 @@ +package wire + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" +) + +// ParseConnectionID parses the destination connection ID of a packet. +func ParseConnectionID(data []byte, shortHeaderConnIDLen int) (protocol.ConnectionID, error) { + if len(data) == 0 { + return protocol.ConnectionID{}, io.EOF + } + if !IsLongHeaderPacket(data[0]) { + if len(data) < shortHeaderConnIDLen+1 { + return protocol.ConnectionID{}, io.EOF + } + return protocol.ParseConnectionID(data[1 : 1+shortHeaderConnIDLen]), nil + } + if len(data) < 6 { + return protocol.ConnectionID{}, io.EOF + } + destConnIDLen := int(data[5]) + if destConnIDLen > protocol.MaxConnIDLen { + return protocol.ConnectionID{}, protocol.ErrInvalidConnectionIDLen + } + if len(data) < 6+destConnIDLen { + return protocol.ConnectionID{}, io.EOF + } + return protocol.ParseConnectionID(data[6 : 6+destConnIDLen]), nil +} + +// ParseArbitraryLenConnectionIDs parses the most general form of a Long Header packet, +// using only the version-independent packet format as described in Section 5.1 of RFC 8999: +// https://datatracker.ietf.org/doc/html/rfc8999#section-5.1. +// This function should only be called on Long Header packets for which we don't support the version. +func ParseArbitraryLenConnectionIDs(data []byte) (bytesParsed int, dest, src protocol.ArbitraryLenConnectionID, _ error) { + startLen := len(data) + if len(data) < 6 { + return 0, nil, nil, io.EOF + } + data = data[5:] // skip first byte and version field + destConnIDLen := data[0] + data = data[1:] + destConnID := make(protocol.ArbitraryLenConnectionID, destConnIDLen) + if len(data) < int(destConnIDLen)+1 { + return 0, nil, nil, io.EOF + } + copy(destConnID, data) + data = data[destConnIDLen:] + srcConnIDLen := data[0] + data = data[1:] + if len(data) < int(srcConnIDLen) { + return 0, nil, nil, io.EOF + } + srcConnID := make(protocol.ArbitraryLenConnectionID, srcConnIDLen) + copy(srcConnID, data) + return startLen - len(data) + int(srcConnIDLen), destConnID, srcConnID, nil +} + +func IsPotentialQUICPacket(firstByte byte) bool { + return firstByte&0x40 > 0 +} + +// IsLongHeaderPacket says if this is a Long Header packet +func IsLongHeaderPacket(firstByte byte) bool { + return firstByte&0x80 > 0 +} + +// ParseVersion parses the QUIC version. +// It should only be called for Long Header packets (Short Header packets don't contain a version number). +func ParseVersion(data []byte) (protocol.Version, error) { + if len(data) < 5 { + return 0, io.EOF + } + return protocol.Version(binary.BigEndian.Uint32(data[1:5])), nil +} + +// IsVersionNegotiationPacket says if this is a version negotiation packet +func IsVersionNegotiationPacket(b []byte) bool { + if len(b) < 5 { + return false + } + return IsLongHeaderPacket(b[0]) && b[1] == 0 && b[2] == 0 && b[3] == 0 && b[4] == 0 +} + +// Is0RTTPacket says if this is a 0-RTT packet. +// A packet sent with a version we don't understand can never be a 0-RTT packet. +func Is0RTTPacket(b []byte) bool { + if len(b) < 5 { + return false + } + if !IsLongHeaderPacket(b[0]) { + return false + } + version := protocol.Version(binary.BigEndian.Uint32(b[1:5])) + //nolint:exhaustive // We only need to test QUIC versions that we support. + switch version { + case protocol.Version1: + return b[0]>>4&0b11 == 0b01 + case protocol.Version2: + return b[0]>>4&0b11 == 0b10 + default: + return false + } +} + +var ErrUnsupportedVersion = errors.New("unsupported version") + +// The Header is the version independent part of the header +type Header struct { + typeByte byte + Type protocol.PacketType + + Version protocol.Version + SrcConnectionID protocol.ConnectionID + DestConnectionID protocol.ConnectionID + + Length protocol.ByteCount + + Token []byte + + parsedLen protocol.ByteCount // how many bytes were read while parsing this header +} + +// ParsePacket parses a long header packet. +// The packet is cut according to the length field. +// If we understand the version, the packet is parsed up unto the packet number. +// Otherwise, only the invariant part of the header is parsed. +func ParsePacket(data []byte) (*Header, []byte, []byte, error) { + if len(data) == 0 || !IsLongHeaderPacket(data[0]) { + return nil, nil, nil, errors.New("not a long header packet") + } + hdr, err := parseHeader(data) + if err != nil { + if errors.Is(err, ErrUnsupportedVersion) { + return hdr, nil, nil, err + } + return nil, nil, nil, err + } + if protocol.ByteCount(len(data)) < hdr.ParsedLen()+hdr.Length { + return nil, nil, nil, fmt.Errorf("packet length (%d bytes) is smaller than the expected length (%d bytes)", len(data)-int(hdr.ParsedLen()), hdr.Length) + } + packetLen := int(hdr.ParsedLen() + hdr.Length) + return hdr, data[:packetLen], data[packetLen:], nil +} + +// ParseHeader parses the header: +// * if we understand the version: up to the packet number +// * if not, only the invariant part of the header +func parseHeader(b []byte) (*Header, error) { + if len(b) == 0 { + return nil, io.EOF + } + typeByte := b[0] + + h := &Header{typeByte: typeByte} + l, err := h.parseLongHeader(b[1:]) + h.parsedLen = protocol.ByteCount(l) + 1 + return h, err +} + +func (h *Header) parseLongHeader(b []byte) (int, error) { + startLen := len(b) + if len(b) < 5 { + return 0, io.EOF + } + h.Version = protocol.Version(binary.BigEndian.Uint32(b[:4])) + if h.Version != 0 && h.typeByte&0x40 == 0 { + return startLen - len(b), errors.New("not a QUIC packet") + } + destConnIDLen := int(b[4]) + if destConnIDLen > protocol.MaxConnIDLen { + return startLen - len(b), protocol.ErrInvalidConnectionIDLen + } + b = b[5:] + if len(b) < destConnIDLen+1 { + return startLen - len(b), io.EOF + } + h.DestConnectionID = protocol.ParseConnectionID(b[:destConnIDLen]) + srcConnIDLen := int(b[destConnIDLen]) + if srcConnIDLen > protocol.MaxConnIDLen { + return startLen - len(b), protocol.ErrInvalidConnectionIDLen + } + b = b[destConnIDLen+1:] + if len(b) < srcConnIDLen { + return startLen - len(b), io.EOF + } + h.SrcConnectionID = protocol.ParseConnectionID(b[:srcConnIDLen]) + b = b[srcConnIDLen:] + if h.Version == 0 { // version negotiation packet + return startLen - len(b), nil + } + // If we don't understand the version, we have no idea how to interpret the rest of the bytes + if !protocol.IsSupportedVersion(protocol.SupportedVersions, h.Version) { + return startLen - len(b), ErrUnsupportedVersion + } + + if h.Version == protocol.Version2 { + switch h.typeByte >> 4 & 0b11 { + case 0b00: + h.Type = protocol.PacketTypeRetry + case 0b01: + h.Type = protocol.PacketTypeInitial + case 0b10: + h.Type = protocol.PacketType0RTT + case 0b11: + h.Type = protocol.PacketTypeHandshake + } + } else { + switch h.typeByte >> 4 & 0b11 { + case 0b00: + h.Type = protocol.PacketTypeInitial + case 0b01: + h.Type = protocol.PacketType0RTT + case 0b10: + h.Type = protocol.PacketTypeHandshake + case 0b11: + h.Type = protocol.PacketTypeRetry + } + } + + if h.Type == protocol.PacketTypeRetry { + tokenLen := len(b) - 16 + if tokenLen <= 0 { + return startLen - len(b), io.EOF + } + h.Token = make([]byte, tokenLen) + copy(h.Token, b[:tokenLen]) + return startLen - len(b) + tokenLen + 16, nil + } + + if h.Type == protocol.PacketTypeInitial { + tokenLen, n, err := quicvarint.Parse(b) + if err != nil { + return startLen - len(b), err + } + b = b[n:] + if tokenLen > uint64(len(b)) { + return startLen - len(b), io.EOF + } + h.Token = make([]byte, tokenLen) + copy(h.Token, b[:tokenLen]) + b = b[tokenLen:] + } + + pl, n, err := quicvarint.Parse(b) + if err != nil { + return 0, err + } + h.Length = protocol.ByteCount(pl) + return startLen - len(b) + n, nil +} + +// ParsedLen returns the number of bytes that were consumed when parsing the header +func (h *Header) ParsedLen() protocol.ByteCount { + return h.parsedLen +} + +// ParseExtended parses the version dependent part of the header. +// The Reader has to be set such that it points to the first byte of the header. +func (h *Header) ParseExtended(data []byte) (*ExtendedHeader, error) { + extHdr := h.toExtendedHeader() + reservedBitsValid, err := extHdr.parse(data) + if err != nil { + return nil, err + } + if !reservedBitsValid { + return extHdr, ErrInvalidReservedBits + } + return extHdr, nil +} + +func (h *Header) toExtendedHeader() *ExtendedHeader { + return &ExtendedHeader{Header: *h} +} + +// PacketType is the type of the packet, for logging purposes +func (h *Header) PacketType() string { + return h.Type.String() +} + +func readPacketNumber(data []byte, pnLen protocol.PacketNumberLen) (protocol.PacketNumber, error) { + var pn protocol.PacketNumber + switch pnLen { + case protocol.PacketNumberLen1: + pn = protocol.PacketNumber(data[0]) + case protocol.PacketNumberLen2: + pn = protocol.PacketNumber(binary.BigEndian.Uint16(data[:2])) + case protocol.PacketNumberLen3: + pn = protocol.PacketNumber(uint32(data[2]) + uint32(data[1])<<8 + uint32(data[0])<<16) + case protocol.PacketNumberLen4: + pn = protocol.PacketNumber(binary.BigEndian.Uint32(data[:4])) + default: + return 0, fmt.Errorf("invalid packet number length: %d", pnLen) + } + return pn, nil +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/interface.go b/vendor/github.com/quic-go/quic-go/internal/wire/interface.go new file mode 100644 index 00000000..bc17883b --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/interface.go @@ -0,0 +1,11 @@ +package wire + +import ( + "github.com/quic-go/quic-go/internal/protocol" +) + +// A Frame in QUIC +type Frame interface { + Append(b []byte, version protocol.Version) ([]byte, error) + Length(version protocol.Version) protocol.ByteCount +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/log.go b/vendor/github.com/quic-go/quic-go/internal/wire/log.go new file mode 100644 index 00000000..c8b28d92 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/log.go @@ -0,0 +1,74 @@ +package wire + +import ( + "fmt" + "strings" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" +) + +// LogFrame logs a frame, either sent or received +func LogFrame(logger utils.Logger, frame Frame, sent bool) { + if !logger.Debug() { + return + } + dir := "<-" + if sent { + dir = "->" + } + switch f := frame.(type) { + case *CryptoFrame: + dataLen := protocol.ByteCount(len(f.Data)) + logger.Debugf("\t%s &wire.CryptoFrame{Offset: %d, Data length: %d, Offset + Data length: %d}", dir, f.Offset, dataLen, f.Offset+dataLen) + case *StreamFrame: + logger.Debugf("\t%s &wire.StreamFrame{StreamID: %d, Fin: %t, Offset: %d, Data length: %d, Offset + Data length: %d}", dir, f.StreamID, f.Fin, f.Offset, f.DataLen(), f.Offset+f.DataLen()) + case *ResetStreamFrame: + logger.Debugf("\t%s &wire.ResetStreamFrame{StreamID: %d, ErrorCode: %#x, FinalSize: %d}", dir, f.StreamID, f.ErrorCode, f.FinalSize) + case *AckFrame: + hasECN := f.ECT0 > 0 || f.ECT1 > 0 || f.ECNCE > 0 + var ecn string + if hasECN { + ecn = fmt.Sprintf(", ECT0: %d, ECT1: %d, CE: %d", f.ECT0, f.ECT1, f.ECNCE) + } + if len(f.AckRanges) > 1 { + ackRanges := make([]string, len(f.AckRanges)) + for i, r := range f.AckRanges { + ackRanges[i] = fmt.Sprintf("{Largest: %d, Smallest: %d}", r.Largest, r.Smallest) + } + logger.Debugf("\t%s &wire.AckFrame{LargestAcked: %d, LowestAcked: %d, AckRanges: {%s}, DelayTime: %s%s}", dir, f.LargestAcked(), f.LowestAcked(), strings.Join(ackRanges, ", "), f.DelayTime.String(), ecn) + } else { + logger.Debugf("\t%s &wire.AckFrame{LargestAcked: %d, LowestAcked: %d, DelayTime: %s%s}", dir, f.LargestAcked(), f.LowestAcked(), f.DelayTime.String(), ecn) + } + case *MaxDataFrame: + logger.Debugf("\t%s &wire.MaxDataFrame{MaximumData: %d}", dir, f.MaximumData) + case *MaxStreamDataFrame: + logger.Debugf("\t%s &wire.MaxStreamDataFrame{StreamID: %d, MaximumStreamData: %d}", dir, f.StreamID, f.MaximumStreamData) + case *DataBlockedFrame: + logger.Debugf("\t%s &wire.DataBlockedFrame{MaximumData: %d}", dir, f.MaximumData) + case *StreamDataBlockedFrame: + logger.Debugf("\t%s &wire.StreamDataBlockedFrame{StreamID: %d, MaximumStreamData: %d}", dir, f.StreamID, f.MaximumStreamData) + case *MaxStreamsFrame: + switch f.Type { + case protocol.StreamTypeUni: + logger.Debugf("\t%s &wire.MaxStreamsFrame{Type: uni, MaxStreamNum: %d}", dir, f.MaxStreamNum) + case protocol.StreamTypeBidi: + logger.Debugf("\t%s &wire.MaxStreamsFrame{Type: bidi, MaxStreamNum: %d}", dir, f.MaxStreamNum) + } + case *StreamsBlockedFrame: + switch f.Type { + case protocol.StreamTypeUni: + logger.Debugf("\t%s &wire.StreamsBlockedFrame{Type: uni, MaxStreams: %d}", dir, f.StreamLimit) + case protocol.StreamTypeBidi: + logger.Debugf("\t%s &wire.StreamsBlockedFrame{Type: bidi, MaxStreams: %d}", dir, f.StreamLimit) + } + case *NewConnectionIDFrame: + logger.Debugf("\t%s &wire.NewConnectionIDFrame{SequenceNumber: %d, RetirePriorTo: %d, ConnectionID: %s, StatelessResetToken: %#x}", dir, f.SequenceNumber, f.RetirePriorTo, f.ConnectionID, f.StatelessResetToken) + case *RetireConnectionIDFrame: + logger.Debugf("\t%s &wire.RetireConnectionIDFrame{SequenceNumber: %d}", dir, f.SequenceNumber) + case *NewTokenFrame: + logger.Debugf("\t%s &wire.NewTokenFrame{Token: %#x}", dir, f.Token) + default: + logger.Debugf("\t%s %#v", dir, frame) + } +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/max_data_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/max_data_frame.go new file mode 100644 index 00000000..5819c027 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/max_data_frame.go @@ -0,0 +1,33 @@ +package wire + +import ( + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" +) + +// A MaxDataFrame carries flow control information for the connection +type MaxDataFrame struct { + MaximumData protocol.ByteCount +} + +// parseMaxDataFrame parses a MAX_DATA frame +func parseMaxDataFrame(b []byte, _ protocol.Version) (*MaxDataFrame, int, error) { + frame := &MaxDataFrame{} + byteOffset, l, err := quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + frame.MaximumData = protocol.ByteCount(byteOffset) + return frame, l, nil +} + +func (f *MaxDataFrame) Append(b []byte, _ protocol.Version) ([]byte, error) { + b = append(b, maxDataFrameType) + b = quicvarint.Append(b, uint64(f.MaximumData)) + return b, nil +} + +// Length of a written frame +func (f *MaxDataFrame) Length(_ protocol.Version) protocol.ByteCount { + return 1 + protocol.ByteCount(quicvarint.Len(uint64(f.MaximumData))) +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/max_stream_data_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/max_stream_data_frame.go new file mode 100644 index 00000000..db9091af --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/max_stream_data_frame.go @@ -0,0 +1,43 @@ +package wire + +import ( + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" +) + +// A MaxStreamDataFrame is a MAX_STREAM_DATA frame +type MaxStreamDataFrame struct { + StreamID protocol.StreamID + MaximumStreamData protocol.ByteCount +} + +func parseMaxStreamDataFrame(b []byte, _ protocol.Version) (*MaxStreamDataFrame, int, error) { + startLen := len(b) + sid, l, err := quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + b = b[l:] + offset, l, err := quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + b = b[l:] + + return &MaxStreamDataFrame{ + StreamID: protocol.StreamID(sid), + MaximumStreamData: protocol.ByteCount(offset), + }, startLen - len(b), nil +} + +func (f *MaxStreamDataFrame) Append(b []byte, _ protocol.Version) ([]byte, error) { + b = append(b, maxStreamDataFrameType) + b = quicvarint.Append(b, uint64(f.StreamID)) + b = quicvarint.Append(b, uint64(f.MaximumStreamData)) + return b, nil +} + +// Length of a written frame +func (f *MaxStreamDataFrame) Length(protocol.Version) protocol.ByteCount { + return 1 + protocol.ByteCount(quicvarint.Len(uint64(f.StreamID))+quicvarint.Len(uint64(f.MaximumStreamData))) +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/max_streams_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/max_streams_frame.go new file mode 100644 index 00000000..a8745bd1 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/max_streams_frame.go @@ -0,0 +1,49 @@ +package wire + +import ( + "fmt" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" +) + +// A MaxStreamsFrame is a MAX_STREAMS frame +type MaxStreamsFrame struct { + Type protocol.StreamType + MaxStreamNum protocol.StreamNum +} + +func parseMaxStreamsFrame(b []byte, typ uint64, _ protocol.Version) (*MaxStreamsFrame, int, error) { + f := &MaxStreamsFrame{} + switch typ { + case bidiMaxStreamsFrameType: + f.Type = protocol.StreamTypeBidi + case uniMaxStreamsFrameType: + f.Type = protocol.StreamTypeUni + } + streamID, l, err := quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + f.MaxStreamNum = protocol.StreamNum(streamID) + if f.MaxStreamNum > protocol.MaxStreamCount { + return nil, 0, fmt.Errorf("%d exceeds the maximum stream count", f.MaxStreamNum) + } + return f, l, nil +} + +func (f *MaxStreamsFrame) Append(b []byte, _ protocol.Version) ([]byte, error) { + switch f.Type { + case protocol.StreamTypeBidi: + b = append(b, bidiMaxStreamsFrameType) + case protocol.StreamTypeUni: + b = append(b, uniMaxStreamsFrameType) + } + b = quicvarint.Append(b, uint64(f.MaxStreamNum)) + return b, nil +} + +// Length of a written frame +func (f *MaxStreamsFrame) Length(protocol.Version) protocol.ByteCount { + return 1 + protocol.ByteCount(quicvarint.Len(uint64(f.MaxStreamNum))) +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/new_connection_id_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/new_connection_id_frame.go new file mode 100644 index 00000000..852d46ef --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/new_connection_id_frame.go @@ -0,0 +1,80 @@ +package wire + +import ( + "errors" + "fmt" + "io" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" +) + +// A NewConnectionIDFrame is a NEW_CONNECTION_ID frame +type NewConnectionIDFrame struct { + SequenceNumber uint64 + RetirePriorTo uint64 + ConnectionID protocol.ConnectionID + StatelessResetToken protocol.StatelessResetToken +} + +func parseNewConnectionIDFrame(b []byte, _ protocol.Version) (*NewConnectionIDFrame, int, error) { + startLen := len(b) + seq, l, err := quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + b = b[l:] + ret, l, err := quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + b = b[l:] + if ret > seq { + //nolint:stylecheck + return nil, 0, fmt.Errorf("Retire Prior To value (%d) larger than Sequence Number (%d)", ret, seq) + } + if len(b) == 0 { + return nil, 0, io.EOF + } + connIDLen := int(b[0]) + b = b[1:] + if connIDLen == 0 { + return nil, 0, errors.New("invalid zero-length connection ID") + } + if connIDLen > protocol.MaxConnIDLen { + return nil, 0, protocol.ErrInvalidConnectionIDLen + } + if len(b) < connIDLen { + return nil, 0, io.EOF + } + frame := &NewConnectionIDFrame{ + SequenceNumber: seq, + RetirePriorTo: ret, + ConnectionID: protocol.ParseConnectionID(b[:connIDLen]), + } + b = b[connIDLen:] + if len(b) < len(frame.StatelessResetToken) { + return nil, 0, io.EOF + } + copy(frame.StatelessResetToken[:], b) + return frame, startLen - len(b) + len(frame.StatelessResetToken), nil +} + +func (f *NewConnectionIDFrame) Append(b []byte, _ protocol.Version) ([]byte, error) { + b = append(b, newConnectionIDFrameType) + b = quicvarint.Append(b, f.SequenceNumber) + b = quicvarint.Append(b, f.RetirePriorTo) + connIDLen := f.ConnectionID.Len() + if connIDLen > protocol.MaxConnIDLen { + return nil, fmt.Errorf("invalid connection ID length: %d", connIDLen) + } + b = append(b, uint8(connIDLen)) + b = append(b, f.ConnectionID.Bytes()...) + b = append(b, f.StatelessResetToken[:]...) + return b, nil +} + +// Length of a written frame +func (f *NewConnectionIDFrame) Length(protocol.Version) protocol.ByteCount { + return 1 + protocol.ByteCount(quicvarint.Len(f.SequenceNumber)+quicvarint.Len(f.RetirePriorTo)+1 /* connection ID length */ +f.ConnectionID.Len()) + 16 +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/new_token_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/new_token_frame.go new file mode 100644 index 00000000..f1d4d00f --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/new_token_frame.go @@ -0,0 +1,43 @@ +package wire + +import ( + "errors" + "io" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" +) + +// A NewTokenFrame is a NEW_TOKEN frame +type NewTokenFrame struct { + Token []byte +} + +func parseNewTokenFrame(b []byte, _ protocol.Version) (*NewTokenFrame, int, error) { + tokenLen, l, err := quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + b = b[l:] + if tokenLen == 0 { + return nil, 0, errors.New("token must not be empty") + } + if uint64(len(b)) < tokenLen { + return nil, 0, io.EOF + } + token := make([]byte, int(tokenLen)) + copy(token, b) + return &NewTokenFrame{Token: token}, l + int(tokenLen), nil +} + +func (f *NewTokenFrame) Append(b []byte, _ protocol.Version) ([]byte, error) { + b = append(b, newTokenFrameType) + b = quicvarint.Append(b, uint64(len(f.Token))) + b = append(b, f.Token...) + return b, nil +} + +// Length of a written frame +func (f *NewTokenFrame) Length(protocol.Version) protocol.ByteCount { + return 1 + protocol.ByteCount(quicvarint.Len(uint64(len(f.Token)))+len(f.Token)) +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/path_challenge_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/path_challenge_frame.go new file mode 100644 index 00000000..2aca989f --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/path_challenge_frame.go @@ -0,0 +1,32 @@ +package wire + +import ( + "io" + + "github.com/quic-go/quic-go/internal/protocol" +) + +// A PathChallengeFrame is a PATH_CHALLENGE frame +type PathChallengeFrame struct { + Data [8]byte +} + +func parsePathChallengeFrame(b []byte, _ protocol.Version) (*PathChallengeFrame, int, error) { + f := &PathChallengeFrame{} + if len(b) < 8 { + return nil, 0, io.EOF + } + copy(f.Data[:], b) + return f, 8, nil +} + +func (f *PathChallengeFrame) Append(b []byte, _ protocol.Version) ([]byte, error) { + b = append(b, pathChallengeFrameType) + b = append(b, f.Data[:]...) + return b, nil +} + +// Length of a written frame +func (f *PathChallengeFrame) Length(_ protocol.Version) protocol.ByteCount { + return 1 + 8 +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/path_response_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/path_response_frame.go new file mode 100644 index 00000000..76532c85 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/path_response_frame.go @@ -0,0 +1,32 @@ +package wire + +import ( + "io" + + "github.com/quic-go/quic-go/internal/protocol" +) + +// A PathResponseFrame is a PATH_RESPONSE frame +type PathResponseFrame struct { + Data [8]byte +} + +func parsePathResponseFrame(b []byte, _ protocol.Version) (*PathResponseFrame, int, error) { + f := &PathResponseFrame{} + if len(b) < 8 { + return nil, 0, io.EOF + } + copy(f.Data[:], b) + return f, 8, nil +} + +func (f *PathResponseFrame) Append(b []byte, _ protocol.Version) ([]byte, error) { + b = append(b, pathResponseFrameType) + b = append(b, f.Data[:]...) + return b, nil +} + +// Length of a written frame +func (f *PathResponseFrame) Length(_ protocol.Version) protocol.ByteCount { + return 1 + 8 +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/ping_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/ping_frame.go new file mode 100644 index 00000000..71f8d16c --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/ping_frame.go @@ -0,0 +1,17 @@ +package wire + +import ( + "github.com/quic-go/quic-go/internal/protocol" +) + +// A PingFrame is a PING frame +type PingFrame struct{} + +func (f *PingFrame) Append(b []byte, _ protocol.Version) ([]byte, error) { + return append(b, pingFrameType), nil +} + +// Length of a written frame +func (f *PingFrame) Length(_ protocol.Version) protocol.ByteCount { + return 1 +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/pool.go b/vendor/github.com/quic-go/quic-go/internal/wire/pool.go new file mode 100644 index 00000000..18ab4379 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/pool.go @@ -0,0 +1,33 @@ +package wire + +import ( + "sync" + + "github.com/quic-go/quic-go/internal/protocol" +) + +var pool sync.Pool + +func init() { + pool.New = func() interface{} { + return &StreamFrame{ + Data: make([]byte, 0, protocol.MaxPacketBufferSize), + fromPool: true, + } + } +} + +func GetStreamFrame() *StreamFrame { + f := pool.Get().(*StreamFrame) + return f +} + +func putStreamFrame(f *StreamFrame) { + if !f.fromPool { + return + } + if protocol.ByteCount(cap(f.Data)) != protocol.MaxPacketBufferSize { + panic("wire.PutStreamFrame called with packet of wrong size!") + } + pool.Put(f) +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/reset_stream_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/reset_stream_frame.go new file mode 100644 index 00000000..a20029af --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/reset_stream_frame.go @@ -0,0 +1,55 @@ +package wire + +import ( + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/quicvarint" +) + +// A ResetStreamFrame is a RESET_STREAM frame in QUIC +type ResetStreamFrame struct { + StreamID protocol.StreamID + ErrorCode qerr.StreamErrorCode + FinalSize protocol.ByteCount +} + +func parseResetStreamFrame(b []byte, _ protocol.Version) (*ResetStreamFrame, int, error) { + startLen := len(b) + var streamID protocol.StreamID + var byteOffset protocol.ByteCount + sid, l, err := quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + b = b[l:] + streamID = protocol.StreamID(sid) + errorCode, l, err := quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + b = b[l:] + bo, l, err := quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + byteOffset = protocol.ByteCount(bo) + + return &ResetStreamFrame{ + StreamID: streamID, + ErrorCode: qerr.StreamErrorCode(errorCode), + FinalSize: byteOffset, + }, startLen - len(b) + l, nil +} + +func (f *ResetStreamFrame) Append(b []byte, _ protocol.Version) ([]byte, error) { + b = append(b, resetStreamFrameType) + b = quicvarint.Append(b, uint64(f.StreamID)) + b = quicvarint.Append(b, uint64(f.ErrorCode)) + b = quicvarint.Append(b, uint64(f.FinalSize)) + return b, nil +} + +// Length of a written frame +func (f *ResetStreamFrame) Length(protocol.Version) protocol.ByteCount { + return 1 + protocol.ByteCount(quicvarint.Len(uint64(f.StreamID))+quicvarint.Len(uint64(f.ErrorCode))+quicvarint.Len(uint64(f.FinalSize))) +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/retire_connection_id_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/retire_connection_id_frame.go new file mode 100644 index 00000000..27aeff84 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/retire_connection_id_frame.go @@ -0,0 +1,30 @@ +package wire + +import ( + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" +) + +// A RetireConnectionIDFrame is a RETIRE_CONNECTION_ID frame +type RetireConnectionIDFrame struct { + SequenceNumber uint64 +} + +func parseRetireConnectionIDFrame(b []byte, _ protocol.Version) (*RetireConnectionIDFrame, int, error) { + seq, l, err := quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + return &RetireConnectionIDFrame{SequenceNumber: seq}, l, nil +} + +func (f *RetireConnectionIDFrame) Append(b []byte, _ protocol.Version) ([]byte, error) { + b = append(b, retireConnectionIDFrameType) + b = quicvarint.Append(b, f.SequenceNumber) + return b, nil +} + +// Length of a written frame +func (f *RetireConnectionIDFrame) Length(protocol.Version) protocol.ByteCount { + return 1 + protocol.ByteCount(quicvarint.Len(f.SequenceNumber)) +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/short_header.go b/vendor/github.com/quic-go/quic-go/internal/wire/short_header.go new file mode 100644 index 00000000..cf2889c5 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/short_header.go @@ -0,0 +1,62 @@ +package wire + +import ( + "errors" + "io" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" +) + +// ParseShortHeader parses a short header packet. +// It must be called after header protection was removed. +// Otherwise, the check for the reserved bits will (most likely) fail. +func ParseShortHeader(data []byte, connIDLen int) (length int, _ protocol.PacketNumber, _ protocol.PacketNumberLen, _ protocol.KeyPhaseBit, _ error) { + if len(data) == 0 { + return 0, 0, 0, 0, io.EOF + } + if data[0]&0x80 > 0 { + return 0, 0, 0, 0, errors.New("not a short header packet") + } + if data[0]&0x40 == 0 { + return 0, 0, 0, 0, errors.New("not a QUIC packet") + } + pnLen := protocol.PacketNumberLen(data[0]&0b11) + 1 + if len(data) < 1+int(pnLen)+connIDLen { + return 0, 0, 0, 0, io.EOF + } + + pos := 1 + connIDLen + pn, err := readPacketNumber(data[pos:], pnLen) + if err != nil { + return 0, 0, 0, 0, err + } + kp := protocol.KeyPhaseZero + if data[0]&0b100 > 0 { + kp = protocol.KeyPhaseOne + } + + if data[0]&0x18 != 0 { + err = ErrInvalidReservedBits + } + return 1 + connIDLen + int(pnLen), pn, pnLen, kp, err +} + +// AppendShortHeader writes a short header. +func AppendShortHeader(b []byte, connID protocol.ConnectionID, pn protocol.PacketNumber, pnLen protocol.PacketNumberLen, kp protocol.KeyPhaseBit) ([]byte, error) { + typeByte := 0x40 | uint8(pnLen-1) + if kp == protocol.KeyPhaseOne { + typeByte |= byte(1 << 2) + } + b = append(b, typeByte) + b = append(b, connID.Bytes()...) + return appendPacketNumber(b, pn, pnLen) +} + +func ShortHeaderLen(dest protocol.ConnectionID, pnLen protocol.PacketNumberLen) protocol.ByteCount { + return 1 + protocol.ByteCount(dest.Len()) + protocol.ByteCount(pnLen) +} + +func LogShortHeader(logger utils.Logger, dest protocol.ConnectionID, pn protocol.PacketNumber, pnLen protocol.PacketNumberLen, kp protocol.KeyPhaseBit) { + logger.Debugf("\tShort Header{DestConnectionID: %s, PacketNumber: %d, PacketNumberLen: %d, KeyPhase: %s}", dest, pn, pnLen, kp) +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/stop_sending_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/stop_sending_frame.go new file mode 100644 index 00000000..a2326f8e --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/stop_sending_frame.go @@ -0,0 +1,45 @@ +package wire + +import ( + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/quicvarint" +) + +// A StopSendingFrame is a STOP_SENDING frame +type StopSendingFrame struct { + StreamID protocol.StreamID + ErrorCode qerr.StreamErrorCode +} + +// parseStopSendingFrame parses a STOP_SENDING frame +func parseStopSendingFrame(b []byte, _ protocol.Version) (*StopSendingFrame, int, error) { + startLen := len(b) + streamID, l, err := quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + b = b[l:] + errorCode, l, err := quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + b = b[l:] + + return &StopSendingFrame{ + StreamID: protocol.StreamID(streamID), + ErrorCode: qerr.StreamErrorCode(errorCode), + }, startLen - len(b), nil +} + +// Length of a written frame +func (f *StopSendingFrame) Length(_ protocol.Version) protocol.ByteCount { + return 1 + protocol.ByteCount(quicvarint.Len(uint64(f.StreamID))+quicvarint.Len(uint64(f.ErrorCode))) +} + +func (f *StopSendingFrame) Append(b []byte, _ protocol.Version) ([]byte, error) { + b = append(b, stopSendingFrameType) + b = quicvarint.Append(b, uint64(f.StreamID)) + b = quicvarint.Append(b, uint64(f.ErrorCode)) + return b, nil +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/stream_data_blocked_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/stream_data_blocked_frame.go new file mode 100644 index 00000000..3762ec76 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/stream_data_blocked_frame.go @@ -0,0 +1,42 @@ +package wire + +import ( + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" +) + +// A StreamDataBlockedFrame is a STREAM_DATA_BLOCKED frame +type StreamDataBlockedFrame struct { + StreamID protocol.StreamID + MaximumStreamData protocol.ByteCount +} + +func parseStreamDataBlockedFrame(b []byte, _ protocol.Version) (*StreamDataBlockedFrame, int, error) { + startLen := len(b) + sid, l, err := quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + b = b[l:] + offset, l, err := quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + + return &StreamDataBlockedFrame{ + StreamID: protocol.StreamID(sid), + MaximumStreamData: protocol.ByteCount(offset), + }, startLen - len(b) + l, nil +} + +func (f *StreamDataBlockedFrame) Append(b []byte, _ protocol.Version) ([]byte, error) { + b = append(b, 0x15) + b = quicvarint.Append(b, uint64(f.StreamID)) + b = quicvarint.Append(b, uint64(f.MaximumStreamData)) + return b, nil +} + +// Length of a written frame +func (f *StreamDataBlockedFrame) Length(protocol.Version) protocol.ByteCount { + return 1 + protocol.ByteCount(quicvarint.Len(uint64(f.StreamID))+quicvarint.Len(uint64(f.MaximumStreamData))) +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/stream_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/stream_frame.go new file mode 100644 index 00000000..f9470ecd --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/stream_frame.go @@ -0,0 +1,188 @@ +package wire + +import ( + "errors" + "io" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" +) + +// A StreamFrame of QUIC +type StreamFrame struct { + StreamID protocol.StreamID + Offset protocol.ByteCount + Data []byte + Fin bool + DataLenPresent bool + + fromPool bool +} + +func parseStreamFrame(b []byte, typ uint64, _ protocol.Version) (*StreamFrame, int, error) { + startLen := len(b) + hasOffset := typ&0b100 > 0 + fin := typ&0b1 > 0 + hasDataLen := typ&0b10 > 0 + + streamID, l, err := quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + b = b[l:] + var offset uint64 + if hasOffset { + offset, l, err = quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + b = b[l:] + } + + var dataLen uint64 + if hasDataLen { + var err error + var l int + dataLen, l, err = quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + b = b[l:] + if dataLen > uint64(len(b)) { + return nil, 0, io.EOF + } + } else { + // The rest of the packet is data + dataLen = uint64(len(b)) + } + + var frame *StreamFrame + if dataLen < protocol.MinStreamFrameBufferSize { + frame = &StreamFrame{Data: make([]byte, dataLen)} + } else { + frame = GetStreamFrame() + // The STREAM frame can't be larger than the StreamFrame we obtained from the buffer, + // since those StreamFrames have a buffer length of the maximum packet size. + if dataLen > uint64(cap(frame.Data)) { + return nil, 0, io.EOF + } + frame.Data = frame.Data[:dataLen] + } + + frame.StreamID = protocol.StreamID(streamID) + frame.Offset = protocol.ByteCount(offset) + frame.Fin = fin + frame.DataLenPresent = hasDataLen + + if dataLen != 0 { + copy(frame.Data, b) + } + if frame.Offset+frame.DataLen() > protocol.MaxByteCount { + return nil, 0, errors.New("stream data overflows maximum offset") + } + return frame, startLen - len(b) + int(dataLen), nil +} + +func (f *StreamFrame) Append(b []byte, _ protocol.Version) ([]byte, error) { + if len(f.Data) == 0 && !f.Fin { + return nil, errors.New("StreamFrame: attempting to write empty frame without FIN") + } + + typ := byte(0x8) + if f.Fin { + typ ^= 0b1 + } + hasOffset := f.Offset != 0 + if f.DataLenPresent { + typ ^= 0b10 + } + if hasOffset { + typ ^= 0b100 + } + b = append(b, typ) + b = quicvarint.Append(b, uint64(f.StreamID)) + if hasOffset { + b = quicvarint.Append(b, uint64(f.Offset)) + } + if f.DataLenPresent { + b = quicvarint.Append(b, uint64(f.DataLen())) + } + b = append(b, f.Data...) + return b, nil +} + +// Length returns the total length of the STREAM frame +func (f *StreamFrame) Length(protocol.Version) protocol.ByteCount { + length := 1 + quicvarint.Len(uint64(f.StreamID)) + if f.Offset != 0 { + length += quicvarint.Len(uint64(f.Offset)) + } + if f.DataLenPresent { + length += quicvarint.Len(uint64(f.DataLen())) + } + return protocol.ByteCount(length) + f.DataLen() +} + +// DataLen gives the length of data in bytes +func (f *StreamFrame) DataLen() protocol.ByteCount { + return protocol.ByteCount(len(f.Data)) +} + +// MaxDataLen returns the maximum data length +// If 0 is returned, writing will fail (a STREAM frame must contain at least 1 byte of data). +func (f *StreamFrame) MaxDataLen(maxSize protocol.ByteCount, _ protocol.Version) protocol.ByteCount { + headerLen := 1 + protocol.ByteCount(quicvarint.Len(uint64(f.StreamID))) + if f.Offset != 0 { + headerLen += protocol.ByteCount(quicvarint.Len(uint64(f.Offset))) + } + if f.DataLenPresent { + // Pretend that the data size will be 1 byte. + // If it turns out that varint encoding the length will consume 2 bytes, we need to adjust the data length afterward + headerLen++ + } + if headerLen > maxSize { + return 0 + } + maxDataLen := maxSize - headerLen + if f.DataLenPresent && quicvarint.Len(uint64(maxDataLen)) != 1 { + maxDataLen-- + } + return maxDataLen +} + +// MaybeSplitOffFrame splits a frame such that it is not bigger than n bytes. +// It returns if the frame was actually split. +// The frame might not be split if: +// * the size is large enough to fit the whole frame +// * the size is too small to fit even a 1-byte frame. In that case, the frame returned is nil. +func (f *StreamFrame) MaybeSplitOffFrame(maxSize protocol.ByteCount, version protocol.Version) (*StreamFrame, bool /* was splitting required */) { + if maxSize >= f.Length(version) { + return nil, false + } + + n := f.MaxDataLen(maxSize, version) + if n == 0 { + return nil, true + } + + new := GetStreamFrame() + new.StreamID = f.StreamID + new.Offset = f.Offset + new.Fin = false + new.DataLenPresent = f.DataLenPresent + + // swap the data slices + new.Data, f.Data = f.Data, new.Data + new.fromPool, f.fromPool = f.fromPool, new.fromPool + + f.Data = f.Data[:protocol.ByteCount(len(new.Data))-n] + copy(f.Data, new.Data[n:]) + new.Data = new.Data[:n] + f.Offset += n + + return new, true +} + +func (f *StreamFrame) PutBack() { + putStreamFrame(f) +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/streams_blocked_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/streams_blocked_frame.go new file mode 100644 index 00000000..c946fec3 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/streams_blocked_frame.go @@ -0,0 +1,49 @@ +package wire + +import ( + "fmt" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" +) + +// A StreamsBlockedFrame is a STREAMS_BLOCKED frame +type StreamsBlockedFrame struct { + Type protocol.StreamType + StreamLimit protocol.StreamNum +} + +func parseStreamsBlockedFrame(b []byte, typ uint64, _ protocol.Version) (*StreamsBlockedFrame, int, error) { + f := &StreamsBlockedFrame{} + switch typ { + case bidiStreamBlockedFrameType: + f.Type = protocol.StreamTypeBidi + case uniStreamBlockedFrameType: + f.Type = protocol.StreamTypeUni + } + streamLimit, l, err := quicvarint.Parse(b) + if err != nil { + return nil, 0, replaceUnexpectedEOF(err) + } + f.StreamLimit = protocol.StreamNum(streamLimit) + if f.StreamLimit > protocol.MaxStreamCount { + return nil, 0, fmt.Errorf("%d exceeds the maximum stream count", f.StreamLimit) + } + return f, l, nil +} + +func (f *StreamsBlockedFrame) Append(b []byte, _ protocol.Version) ([]byte, error) { + switch f.Type { + case protocol.StreamTypeBidi: + b = append(b, bidiStreamBlockedFrameType) + case protocol.StreamTypeUni: + b = append(b, uniStreamBlockedFrameType) + } + b = quicvarint.Append(b, uint64(f.StreamLimit)) + return b, nil +} + +// Length of a written frame +func (f *StreamsBlockedFrame) Length(_ protocol.Version) protocol.ByteCount { + return 1 + protocol.ByteCount(quicvarint.Len(uint64(f.StreamLimit))) +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/transport_parameters.go b/vendor/github.com/quic-go/quic-go/internal/wire/transport_parameters.go new file mode 100644 index 00000000..cee74b8f --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/transport_parameters.go @@ -0,0 +1,532 @@ +package wire + +import ( + "crypto/rand" + "encoding/binary" + "errors" + "fmt" + "io" + "net/netip" + "slices" + "time" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/quicvarint" +) + +// AdditionalTransportParametersClient are additional transport parameters that will be added +// to the client's transport parameters. +// This is not intended for production use, but _only_ to increase the size of the ClientHello beyond +// the usual size of less than 1 MTU. +var AdditionalTransportParametersClient map[uint64][]byte + +const transportParameterMarshalingVersion = 1 + +type transportParameterID uint64 + +const ( + originalDestinationConnectionIDParameterID transportParameterID = 0x0 + maxIdleTimeoutParameterID transportParameterID = 0x1 + statelessResetTokenParameterID transportParameterID = 0x2 + maxUDPPayloadSizeParameterID transportParameterID = 0x3 + initialMaxDataParameterID transportParameterID = 0x4 + initialMaxStreamDataBidiLocalParameterID transportParameterID = 0x5 + initialMaxStreamDataBidiRemoteParameterID transportParameterID = 0x6 + initialMaxStreamDataUniParameterID transportParameterID = 0x7 + initialMaxStreamsBidiParameterID transportParameterID = 0x8 + initialMaxStreamsUniParameterID transportParameterID = 0x9 + ackDelayExponentParameterID transportParameterID = 0xa + maxAckDelayParameterID transportParameterID = 0xb + disableActiveMigrationParameterID transportParameterID = 0xc + preferredAddressParameterID transportParameterID = 0xd + activeConnectionIDLimitParameterID transportParameterID = 0xe + initialSourceConnectionIDParameterID transportParameterID = 0xf + retrySourceConnectionIDParameterID transportParameterID = 0x10 + // RFC 9221 + maxDatagramFrameSizeParameterID transportParameterID = 0x20 +) + +// PreferredAddress is the value encoding in the preferred_address transport parameter +type PreferredAddress struct { + IPv4, IPv6 netip.AddrPort + ConnectionID protocol.ConnectionID + StatelessResetToken protocol.StatelessResetToken +} + +// TransportParameters are parameters sent to the peer during the handshake +type TransportParameters struct { + InitialMaxStreamDataBidiLocal protocol.ByteCount + InitialMaxStreamDataBidiRemote protocol.ByteCount + InitialMaxStreamDataUni protocol.ByteCount + InitialMaxData protocol.ByteCount + + MaxAckDelay time.Duration + AckDelayExponent uint8 + + DisableActiveMigration bool + + MaxUDPPayloadSize protocol.ByteCount + + MaxUniStreamNum protocol.StreamNum + MaxBidiStreamNum protocol.StreamNum + + MaxIdleTimeout time.Duration + + PreferredAddress *PreferredAddress + + OriginalDestinationConnectionID protocol.ConnectionID + InitialSourceConnectionID protocol.ConnectionID + RetrySourceConnectionID *protocol.ConnectionID // use a pointer here to distinguish zero-length connection IDs from missing transport parameters + + StatelessResetToken *protocol.StatelessResetToken + ActiveConnectionIDLimit uint64 + + MaxDatagramFrameSize protocol.ByteCount +} + +// Unmarshal the transport parameters +func (p *TransportParameters) Unmarshal(data []byte, sentBy protocol.Perspective) error { + if err := p.unmarshal(data, sentBy, false); err != nil { + return &qerr.TransportError{ + ErrorCode: qerr.TransportParameterError, + ErrorMessage: err.Error(), + } + } + return nil +} + +func (p *TransportParameters) unmarshal(b []byte, sentBy protocol.Perspective, fromSessionTicket bool) error { + // needed to check that every parameter is only sent at most once + parameterIDs := make([]transportParameterID, 0, 32) + + var ( + readOriginalDestinationConnectionID bool + readInitialSourceConnectionID bool + readActiveConnectionIDLimit bool + ) + + p.AckDelayExponent = protocol.DefaultAckDelayExponent + p.MaxAckDelay = protocol.DefaultMaxAckDelay + p.MaxDatagramFrameSize = protocol.InvalidByteCount + + for len(b) > 0 { + paramIDInt, l, err := quicvarint.Parse(b) + if err != nil { + return err + } + paramID := transportParameterID(paramIDInt) + b = b[l:] + paramLen, l, err := quicvarint.Parse(b) + if err != nil { + return err + } + b = b[l:] + if uint64(len(b)) < paramLen { + return fmt.Errorf("remaining length (%d) smaller than parameter length (%d)", len(b), paramLen) + } + parameterIDs = append(parameterIDs, paramID) + switch paramID { + case activeConnectionIDLimitParameterID: + readActiveConnectionIDLimit = true + fallthrough + case maxIdleTimeoutParameterID, + maxUDPPayloadSizeParameterID, + initialMaxDataParameterID, + initialMaxStreamDataBidiLocalParameterID, + initialMaxStreamDataBidiRemoteParameterID, + initialMaxStreamDataUniParameterID, + initialMaxStreamsBidiParameterID, + initialMaxStreamsUniParameterID, + maxAckDelayParameterID, + maxDatagramFrameSizeParameterID, + ackDelayExponentParameterID: + if err := p.readNumericTransportParameter(b, paramID, int(paramLen)); err != nil { + return err + } + b = b[paramLen:] + case preferredAddressParameterID: + if sentBy == protocol.PerspectiveClient { + return errors.New("client sent a preferred_address") + } + if err := p.readPreferredAddress(b, int(paramLen)); err != nil { + return err + } + b = b[paramLen:] + case disableActiveMigrationParameterID: + if paramLen != 0 { + return fmt.Errorf("wrong length for disable_active_migration: %d (expected empty)", paramLen) + } + p.DisableActiveMigration = true + case statelessResetTokenParameterID: + if sentBy == protocol.PerspectiveClient { + return errors.New("client sent a stateless_reset_token") + } + if paramLen != 16 { + return fmt.Errorf("wrong length for stateless_reset_token: %d (expected 16)", paramLen) + } + var token protocol.StatelessResetToken + if len(b) < len(token) { + return io.EOF + } + copy(token[:], b) + b = b[len(token):] + p.StatelessResetToken = &token + case originalDestinationConnectionIDParameterID: + if sentBy == protocol.PerspectiveClient { + return errors.New("client sent an original_destination_connection_id") + } + if paramLen > protocol.MaxConnIDLen { + return protocol.ErrInvalidConnectionIDLen + } + p.OriginalDestinationConnectionID = protocol.ParseConnectionID(b[:paramLen]) + b = b[paramLen:] + readOriginalDestinationConnectionID = true + case initialSourceConnectionIDParameterID: + if paramLen > protocol.MaxConnIDLen { + return protocol.ErrInvalidConnectionIDLen + } + p.InitialSourceConnectionID = protocol.ParseConnectionID(b[:paramLen]) + b = b[paramLen:] + readInitialSourceConnectionID = true + case retrySourceConnectionIDParameterID: + if sentBy == protocol.PerspectiveClient { + return errors.New("client sent a retry_source_connection_id") + } + if paramLen > protocol.MaxConnIDLen { + return protocol.ErrInvalidConnectionIDLen + } + connID := protocol.ParseConnectionID(b[:paramLen]) + b = b[paramLen:] + p.RetrySourceConnectionID = &connID + default: + b = b[paramLen:] + } + } + + if !readActiveConnectionIDLimit { + p.ActiveConnectionIDLimit = protocol.DefaultActiveConnectionIDLimit + } + if !fromSessionTicket { + if sentBy == protocol.PerspectiveServer && !readOriginalDestinationConnectionID { + return errors.New("missing original_destination_connection_id") + } + if p.MaxUDPPayloadSize == 0 { + p.MaxUDPPayloadSize = protocol.MaxByteCount + } + if !readInitialSourceConnectionID { + return errors.New("missing initial_source_connection_id") + } + } + + // check that every transport parameter was sent at most once + slices.SortFunc(parameterIDs, func(a, b transportParameterID) int { + if a < b { + return -1 + } + return 1 + }) + for i := 0; i < len(parameterIDs)-1; i++ { + if parameterIDs[i] == parameterIDs[i+1] { + return fmt.Errorf("received duplicate transport parameter %#x", parameterIDs[i]) + } + } + + return nil +} + +func (p *TransportParameters) readPreferredAddress(b []byte, expectedLen int) error { + remainingLen := len(b) + pa := &PreferredAddress{} + if len(b) < 4+2+16+2+1 { + return io.EOF + } + var ipv4 [4]byte + copy(ipv4[:], b[:4]) + port4 := binary.BigEndian.Uint16(b[4:]) + b = b[4+2:] + pa.IPv4 = netip.AddrPortFrom(netip.AddrFrom4(ipv4), port4) + var ipv6 [16]byte + copy(ipv6[:], b[:16]) + port6 := binary.BigEndian.Uint16(b[16:]) + pa.IPv6 = netip.AddrPortFrom(netip.AddrFrom16(ipv6), port6) + b = b[16+2:] + connIDLen := int(b[0]) + b = b[1:] + if connIDLen == 0 || connIDLen > protocol.MaxConnIDLen { + return fmt.Errorf("invalid connection ID length: %d", connIDLen) + } + if len(b) < connIDLen+len(pa.StatelessResetToken) { + return io.EOF + } + pa.ConnectionID = protocol.ParseConnectionID(b[:connIDLen]) + b = b[connIDLen:] + copy(pa.StatelessResetToken[:], b) + b = b[len(pa.StatelessResetToken):] + if bytesRead := remainingLen - len(b); bytesRead != expectedLen { + return fmt.Errorf("expected preferred_address to be %d long, read %d bytes", expectedLen, bytesRead) + } + p.PreferredAddress = pa + return nil +} + +func (p *TransportParameters) readNumericTransportParameter(b []byte, paramID transportParameterID, expectedLen int) error { + val, l, err := quicvarint.Parse(b) + if err != nil { + return fmt.Errorf("error while reading transport parameter %d: %s", paramID, err) + } + if l != expectedLen { + return fmt.Errorf("inconsistent transport parameter length for transport parameter %#x", paramID) + } + //nolint:exhaustive // This only covers the numeric transport parameters. + switch paramID { + case initialMaxStreamDataBidiLocalParameterID: + p.InitialMaxStreamDataBidiLocal = protocol.ByteCount(val) + case initialMaxStreamDataBidiRemoteParameterID: + p.InitialMaxStreamDataBidiRemote = protocol.ByteCount(val) + case initialMaxStreamDataUniParameterID: + p.InitialMaxStreamDataUni = protocol.ByteCount(val) + case initialMaxDataParameterID: + p.InitialMaxData = protocol.ByteCount(val) + case initialMaxStreamsBidiParameterID: + p.MaxBidiStreamNum = protocol.StreamNum(val) + if p.MaxBidiStreamNum > protocol.MaxStreamCount { + return fmt.Errorf("initial_max_streams_bidi too large: %d (maximum %d)", p.MaxBidiStreamNum, protocol.MaxStreamCount) + } + case initialMaxStreamsUniParameterID: + p.MaxUniStreamNum = protocol.StreamNum(val) + if p.MaxUniStreamNum > protocol.MaxStreamCount { + return fmt.Errorf("initial_max_streams_uni too large: %d (maximum %d)", p.MaxUniStreamNum, protocol.MaxStreamCount) + } + case maxIdleTimeoutParameterID: + p.MaxIdleTimeout = max(protocol.MinRemoteIdleTimeout, time.Duration(val)*time.Millisecond) + case maxUDPPayloadSizeParameterID: + if val < 1200 { + return fmt.Errorf("invalid value for max_udp_payload_size: %d (minimum 1200)", val) + } + p.MaxUDPPayloadSize = protocol.ByteCount(val) + case ackDelayExponentParameterID: + if val > protocol.MaxAckDelayExponent { + return fmt.Errorf("invalid value for ack_delay_exponent: %d (maximum %d)", val, protocol.MaxAckDelayExponent) + } + p.AckDelayExponent = uint8(val) + case maxAckDelayParameterID: + if val > uint64(protocol.MaxMaxAckDelay/time.Millisecond) { + return fmt.Errorf("invalid value for max_ack_delay: %dms (maximum %dms)", val, protocol.MaxMaxAckDelay/time.Millisecond) + } + p.MaxAckDelay = time.Duration(val) * time.Millisecond + case activeConnectionIDLimitParameterID: + if val < 2 { + return fmt.Errorf("invalid value for active_connection_id_limit: %d (minimum 2)", val) + } + p.ActiveConnectionIDLimit = val + case maxDatagramFrameSizeParameterID: + p.MaxDatagramFrameSize = protocol.ByteCount(val) + default: + return fmt.Errorf("TransportParameter BUG: transport parameter %d not found", paramID) + } + return nil +} + +// Marshal the transport parameters +func (p *TransportParameters) Marshal(pers protocol.Perspective) []byte { + // Typical Transport Parameters consume around 110 bytes, depending on the exact values, + // especially the lengths of the Connection IDs. + // Allocate 256 bytes, so we won't have to grow the slice in any case. + b := make([]byte, 0, 256) + + // add a greased value + random := make([]byte, 18) + rand.Read(random) + b = quicvarint.Append(b, 27+31*uint64(random[0])) + length := random[1] % 16 + b = quicvarint.Append(b, uint64(length)) + b = append(b, random[2:2+length]...) + + // initial_max_stream_data_bidi_local + b = p.marshalVarintParam(b, initialMaxStreamDataBidiLocalParameterID, uint64(p.InitialMaxStreamDataBidiLocal)) + // initial_max_stream_data_bidi_remote + b = p.marshalVarintParam(b, initialMaxStreamDataBidiRemoteParameterID, uint64(p.InitialMaxStreamDataBidiRemote)) + // initial_max_stream_data_uni + b = p.marshalVarintParam(b, initialMaxStreamDataUniParameterID, uint64(p.InitialMaxStreamDataUni)) + // initial_max_data + b = p.marshalVarintParam(b, initialMaxDataParameterID, uint64(p.InitialMaxData)) + // initial_max_bidi_streams + b = p.marshalVarintParam(b, initialMaxStreamsBidiParameterID, uint64(p.MaxBidiStreamNum)) + // initial_max_uni_streams + b = p.marshalVarintParam(b, initialMaxStreamsUniParameterID, uint64(p.MaxUniStreamNum)) + // idle_timeout + b = p.marshalVarintParam(b, maxIdleTimeoutParameterID, uint64(p.MaxIdleTimeout/time.Millisecond)) + // max_udp_payload_size + if p.MaxUDPPayloadSize > 0 { + b = p.marshalVarintParam(b, maxUDPPayloadSizeParameterID, uint64(p.MaxUDPPayloadSize)) + } + // max_ack_delay + // Only send it if is different from the default value. + if p.MaxAckDelay != protocol.DefaultMaxAckDelay { + b = p.marshalVarintParam(b, maxAckDelayParameterID, uint64(p.MaxAckDelay/time.Millisecond)) + } + // ack_delay_exponent + // Only send it if is different from the default value. + if p.AckDelayExponent != protocol.DefaultAckDelayExponent { + b = p.marshalVarintParam(b, ackDelayExponentParameterID, uint64(p.AckDelayExponent)) + } + // disable_active_migration + if p.DisableActiveMigration { + b = quicvarint.Append(b, uint64(disableActiveMigrationParameterID)) + b = quicvarint.Append(b, 0) + } + if pers == protocol.PerspectiveServer { + // stateless_reset_token + if p.StatelessResetToken != nil { + b = quicvarint.Append(b, uint64(statelessResetTokenParameterID)) + b = quicvarint.Append(b, 16) + b = append(b, p.StatelessResetToken[:]...) + } + // original_destination_connection_id + b = quicvarint.Append(b, uint64(originalDestinationConnectionIDParameterID)) + b = quicvarint.Append(b, uint64(p.OriginalDestinationConnectionID.Len())) + b = append(b, p.OriginalDestinationConnectionID.Bytes()...) + // preferred_address + if p.PreferredAddress != nil { + b = quicvarint.Append(b, uint64(preferredAddressParameterID)) + b = quicvarint.Append(b, 4+2+16+2+1+uint64(p.PreferredAddress.ConnectionID.Len())+16) + ip4 := p.PreferredAddress.IPv4.Addr().As4() + b = append(b, ip4[:]...) + b = binary.BigEndian.AppendUint16(b, p.PreferredAddress.IPv4.Port()) + ip6 := p.PreferredAddress.IPv6.Addr().As16() + b = append(b, ip6[:]...) + b = binary.BigEndian.AppendUint16(b, p.PreferredAddress.IPv6.Port()) + b = append(b, uint8(p.PreferredAddress.ConnectionID.Len())) + b = append(b, p.PreferredAddress.ConnectionID.Bytes()...) + b = append(b, p.PreferredAddress.StatelessResetToken[:]...) + } + } + // active_connection_id_limit + if p.ActiveConnectionIDLimit != protocol.DefaultActiveConnectionIDLimit { + b = p.marshalVarintParam(b, activeConnectionIDLimitParameterID, p.ActiveConnectionIDLimit) + } + // initial_source_connection_id + b = quicvarint.Append(b, uint64(initialSourceConnectionIDParameterID)) + b = quicvarint.Append(b, uint64(p.InitialSourceConnectionID.Len())) + b = append(b, p.InitialSourceConnectionID.Bytes()...) + // retry_source_connection_id + if pers == protocol.PerspectiveServer && p.RetrySourceConnectionID != nil { + b = quicvarint.Append(b, uint64(retrySourceConnectionIDParameterID)) + b = quicvarint.Append(b, uint64(p.RetrySourceConnectionID.Len())) + b = append(b, p.RetrySourceConnectionID.Bytes()...) + } + if p.MaxDatagramFrameSize != protocol.InvalidByteCount { + b = p.marshalVarintParam(b, maxDatagramFrameSizeParameterID, uint64(p.MaxDatagramFrameSize)) + } + + if pers == protocol.PerspectiveClient && len(AdditionalTransportParametersClient) > 0 { + for k, v := range AdditionalTransportParametersClient { + b = quicvarint.Append(b, k) + b = quicvarint.Append(b, uint64(len(v))) + b = append(b, v...) + } + } + + return b +} + +func (p *TransportParameters) marshalVarintParam(b []byte, id transportParameterID, val uint64) []byte { + b = quicvarint.Append(b, uint64(id)) + b = quicvarint.Append(b, uint64(quicvarint.Len(val))) + return quicvarint.Append(b, val) +} + +// MarshalForSessionTicket marshals the transport parameters we save in the session ticket. +// When sending a 0-RTT enabled TLS session tickets, we need to save the transport parameters. +// The client will remember the transport parameters used in the last session, +// and apply those to the 0-RTT data it sends. +// Saving the transport parameters in the ticket gives the server the option to reject 0-RTT +// if the transport parameters changed. +// Since the session ticket is encrypted, the serialization format is defined by the server. +// For convenience, we use the same format that we also use for sending the transport parameters. +func (p *TransportParameters) MarshalForSessionTicket(b []byte) []byte { + b = quicvarint.Append(b, transportParameterMarshalingVersion) + + // initial_max_stream_data_bidi_local + b = p.marshalVarintParam(b, initialMaxStreamDataBidiLocalParameterID, uint64(p.InitialMaxStreamDataBidiLocal)) + // initial_max_stream_data_bidi_remote + b = p.marshalVarintParam(b, initialMaxStreamDataBidiRemoteParameterID, uint64(p.InitialMaxStreamDataBidiRemote)) + // initial_max_stream_data_uni + b = p.marshalVarintParam(b, initialMaxStreamDataUniParameterID, uint64(p.InitialMaxStreamDataUni)) + // initial_max_data + b = p.marshalVarintParam(b, initialMaxDataParameterID, uint64(p.InitialMaxData)) + // initial_max_bidi_streams + b = p.marshalVarintParam(b, initialMaxStreamsBidiParameterID, uint64(p.MaxBidiStreamNum)) + // initial_max_uni_streams + b = p.marshalVarintParam(b, initialMaxStreamsUniParameterID, uint64(p.MaxUniStreamNum)) + // max_datagram_frame_size + if p.MaxDatagramFrameSize != protocol.InvalidByteCount { + b = p.marshalVarintParam(b, maxDatagramFrameSizeParameterID, uint64(p.MaxDatagramFrameSize)) + } + // active_connection_id_limit + return p.marshalVarintParam(b, activeConnectionIDLimitParameterID, p.ActiveConnectionIDLimit) +} + +// UnmarshalFromSessionTicket unmarshals transport parameters from a session ticket. +func (p *TransportParameters) UnmarshalFromSessionTicket(b []byte) error { + version, l, err := quicvarint.Parse(b) + if err != nil { + return err + } + if version != transportParameterMarshalingVersion { + return fmt.Errorf("unknown transport parameter marshaling version: %d", version) + } + return p.unmarshal(b[l:], protocol.PerspectiveServer, true) +} + +// ValidFor0RTT checks if the transport parameters match those saved in the session ticket. +func (p *TransportParameters) ValidFor0RTT(saved *TransportParameters) bool { + if saved.MaxDatagramFrameSize != protocol.InvalidByteCount && (p.MaxDatagramFrameSize == protocol.InvalidByteCount || p.MaxDatagramFrameSize < saved.MaxDatagramFrameSize) { + return false + } + return p.InitialMaxStreamDataBidiLocal >= saved.InitialMaxStreamDataBidiLocal && + p.InitialMaxStreamDataBidiRemote >= saved.InitialMaxStreamDataBidiRemote && + p.InitialMaxStreamDataUni >= saved.InitialMaxStreamDataUni && + p.InitialMaxData >= saved.InitialMaxData && + p.MaxBidiStreamNum >= saved.MaxBidiStreamNum && + p.MaxUniStreamNum >= saved.MaxUniStreamNum && + p.ActiveConnectionIDLimit == saved.ActiveConnectionIDLimit +} + +// ValidForUpdate checks that the new transport parameters don't reduce limits after resuming a 0-RTT connection. +// It is only used on the client side. +func (p *TransportParameters) ValidForUpdate(saved *TransportParameters) bool { + if saved.MaxDatagramFrameSize != protocol.InvalidByteCount && (p.MaxDatagramFrameSize == protocol.InvalidByteCount || p.MaxDatagramFrameSize < saved.MaxDatagramFrameSize) { + return false + } + return p.ActiveConnectionIDLimit >= saved.ActiveConnectionIDLimit && + p.InitialMaxData >= saved.InitialMaxData && + p.InitialMaxStreamDataBidiLocal >= saved.InitialMaxStreamDataBidiLocal && + p.InitialMaxStreamDataBidiRemote >= saved.InitialMaxStreamDataBidiRemote && + p.InitialMaxStreamDataUni >= saved.InitialMaxStreamDataUni && + p.MaxBidiStreamNum >= saved.MaxBidiStreamNum && + p.MaxUniStreamNum >= saved.MaxUniStreamNum +} + +// String returns a string representation, intended for logging. +func (p *TransportParameters) String() string { + logString := "&wire.TransportParameters{OriginalDestinationConnectionID: %s, InitialSourceConnectionID: %s, " + logParams := []interface{}{p.OriginalDestinationConnectionID, p.InitialSourceConnectionID} + if p.RetrySourceConnectionID != nil { + logString += "RetrySourceConnectionID: %s, " + logParams = append(logParams, p.RetrySourceConnectionID) + } + logString += "InitialMaxStreamDataBidiLocal: %d, InitialMaxStreamDataBidiRemote: %d, InitialMaxStreamDataUni: %d, InitialMaxData: %d, MaxBidiStreamNum: %d, MaxUniStreamNum: %d, MaxIdleTimeout: %s, AckDelayExponent: %d, MaxAckDelay: %s, ActiveConnectionIDLimit: %d" + logParams = append(logParams, []interface{}{p.InitialMaxStreamDataBidiLocal, p.InitialMaxStreamDataBidiRemote, p.InitialMaxStreamDataUni, p.InitialMaxData, p.MaxBidiStreamNum, p.MaxUniStreamNum, p.MaxIdleTimeout, p.AckDelayExponent, p.MaxAckDelay, p.ActiveConnectionIDLimit}...) + if p.StatelessResetToken != nil { // the client never sends a stateless reset token + logString += ", StatelessResetToken: %#x" + logParams = append(logParams, *p.StatelessResetToken) + } + if p.MaxDatagramFrameSize != protocol.InvalidByteCount { + logString += ", MaxDatagramFrameSize: %d" + logParams = append(logParams, p.MaxDatagramFrameSize) + } + logString += "}" + return fmt.Sprintf(logString, logParams...) +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/version_negotiation.go b/vendor/github.com/quic-go/quic-go/internal/wire/version_negotiation.go new file mode 100644 index 00000000..a551aa8c --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/version_negotiation.go @@ -0,0 +1,53 @@ +package wire + +import ( + "crypto/rand" + "encoding/binary" + "errors" + + "github.com/quic-go/quic-go/internal/protocol" +) + +// ParseVersionNegotiationPacket parses a Version Negotiation packet. +func ParseVersionNegotiationPacket(b []byte) (dest, src protocol.ArbitraryLenConnectionID, _ []protocol.Version, _ error) { + n, dest, src, err := ParseArbitraryLenConnectionIDs(b) + if err != nil { + return nil, nil, nil, err + } + b = b[n:] + if len(b) == 0 { + //nolint:stylecheck + return nil, nil, nil, errors.New("Version Negotiation packet has empty version list") + } + if len(b)%4 != 0 { + //nolint:stylecheck + return nil, nil, nil, errors.New("Version Negotiation packet has a version list with an invalid length") + } + versions := make([]protocol.Version, len(b)/4) + for i := 0; len(b) > 0; i++ { + versions[i] = protocol.Version(binary.BigEndian.Uint32(b[:4])) + b = b[4:] + } + return dest, src, versions, nil +} + +// ComposeVersionNegotiation composes a Version Negotiation +func ComposeVersionNegotiation(destConnID, srcConnID protocol.ArbitraryLenConnectionID, versions []protocol.Version) []byte { + greasedVersions := protocol.GetGreasedVersions(versions) + expectedLen := 1 /* type byte */ + 4 /* version field */ + 1 /* dest connection ID length field */ + destConnID.Len() + 1 /* src connection ID length field */ + srcConnID.Len() + len(greasedVersions)*4 + buf := make([]byte, 1+4 /* type byte and version field */, expectedLen) + _, _ = rand.Read(buf[:1]) // ignore the error here. It is not critical to have perfect random here. + // Setting the "QUIC bit" (0x40) is not required by the RFC, + // but it allows clients to demultiplex QUIC with a long list of other protocols. + // See RFC 9443 and https://mailarchive.ietf.org/arch/msg/quic/oR4kxGKY6mjtPC1CZegY1ED4beg/ for details. + buf[0] |= 0xc0 + // The next 4 bytes are left at 0 (version number). + buf = append(buf, uint8(destConnID.Len())) + buf = append(buf, destConnID.Bytes()...) + buf = append(buf, uint8(srcConnID.Len())) + buf = append(buf, srcConnID.Bytes()...) + for _, v := range greasedVersions { + buf = binary.BigEndian.AppendUint32(buf, uint32(v)) + } + return buf +} diff --git a/vendor/github.com/quic-go/quic-go/logging/connection_tracer.go b/vendor/github.com/quic-go/quic-go/logging/connection_tracer.go new file mode 100644 index 00000000..96bf4617 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/logging/connection_tracer.go @@ -0,0 +1,271 @@ +package logging + +import ( + "net" + "time" +) + +// A ConnectionTracer records events. +type ConnectionTracer struct { + StartedConnection func(local, remote net.Addr, srcConnID, destConnID ConnectionID) + NegotiatedVersion func(chosen Version, clientVersions, serverVersions []Version) + ClosedConnection func(error) + SentTransportParameters func(*TransportParameters) + ReceivedTransportParameters func(*TransportParameters) + RestoredTransportParameters func(parameters *TransportParameters) // for 0-RTT + SentLongHeaderPacket func(*ExtendedHeader, ByteCount, ECN, *AckFrame, []Frame) + SentShortHeaderPacket func(*ShortHeader, ByteCount, ECN, *AckFrame, []Frame) + ReceivedVersionNegotiationPacket func(dest, src ArbitraryLenConnectionID, _ []Version) + ReceivedRetry func(*Header) + ReceivedLongHeaderPacket func(*ExtendedHeader, ByteCount, ECN, []Frame) + ReceivedShortHeaderPacket func(*ShortHeader, ByteCount, ECN, []Frame) + BufferedPacket func(PacketType, ByteCount) + DroppedPacket func(PacketType, PacketNumber, ByteCount, PacketDropReason) + UpdatedMetrics func(rttStats *RTTStats, cwnd, bytesInFlight ByteCount, packetsInFlight int) + AcknowledgedPacket func(EncryptionLevel, PacketNumber) + LostPacket func(EncryptionLevel, PacketNumber, PacketLossReason) + UpdatedMTU func(mtu ByteCount, done bool) + UpdatedCongestionState func(CongestionState) + UpdatedPTOCount func(value uint32) + UpdatedKeyFromTLS func(EncryptionLevel, Perspective) + UpdatedKey func(keyPhase KeyPhase, remote bool) + DroppedEncryptionLevel func(EncryptionLevel) + DroppedKey func(keyPhase KeyPhase) + SetLossTimer func(TimerType, EncryptionLevel, time.Time) + LossTimerExpired func(TimerType, EncryptionLevel) + LossTimerCanceled func() + ECNStateUpdated func(state ECNState, trigger ECNStateTrigger) + ChoseALPN func(protocol string) + // Close is called when the connection is closed. + Close func() + Debug func(name, msg string) +} + +// NewMultiplexedConnectionTracer creates a new connection tracer that multiplexes events to multiple tracers. +func NewMultiplexedConnectionTracer(tracers ...*ConnectionTracer) *ConnectionTracer { + if len(tracers) == 0 { + return nil + } + if len(tracers) == 1 { + return tracers[0] + } + return &ConnectionTracer{ + StartedConnection: func(local, remote net.Addr, srcConnID, destConnID ConnectionID) { + for _, t := range tracers { + if t.StartedConnection != nil { + t.StartedConnection(local, remote, srcConnID, destConnID) + } + } + }, + NegotiatedVersion: func(chosen Version, clientVersions, serverVersions []Version) { + for _, t := range tracers { + if t.NegotiatedVersion != nil { + t.NegotiatedVersion(chosen, clientVersions, serverVersions) + } + } + }, + ClosedConnection: func(e error) { + for _, t := range tracers { + if t.ClosedConnection != nil { + t.ClosedConnection(e) + } + } + }, + SentTransportParameters: func(tp *TransportParameters) { + for _, t := range tracers { + if t.SentTransportParameters != nil { + t.SentTransportParameters(tp) + } + } + }, + ReceivedTransportParameters: func(tp *TransportParameters) { + for _, t := range tracers { + if t.ReceivedTransportParameters != nil { + t.ReceivedTransportParameters(tp) + } + } + }, + RestoredTransportParameters: func(tp *TransportParameters) { + for _, t := range tracers { + if t.RestoredTransportParameters != nil { + t.RestoredTransportParameters(tp) + } + } + }, + SentLongHeaderPacket: func(hdr *ExtendedHeader, size ByteCount, ecn ECN, ack *AckFrame, frames []Frame) { + for _, t := range tracers { + if t.SentLongHeaderPacket != nil { + t.SentLongHeaderPacket(hdr, size, ecn, ack, frames) + } + } + }, + SentShortHeaderPacket: func(hdr *ShortHeader, size ByteCount, ecn ECN, ack *AckFrame, frames []Frame) { + for _, t := range tracers { + if t.SentShortHeaderPacket != nil { + t.SentShortHeaderPacket(hdr, size, ecn, ack, frames) + } + } + }, + ReceivedVersionNegotiationPacket: func(dest, src ArbitraryLenConnectionID, versions []Version) { + for _, t := range tracers { + if t.ReceivedVersionNegotiationPacket != nil { + t.ReceivedVersionNegotiationPacket(dest, src, versions) + } + } + }, + ReceivedRetry: func(hdr *Header) { + for _, t := range tracers { + if t.ReceivedRetry != nil { + t.ReceivedRetry(hdr) + } + } + }, + ReceivedLongHeaderPacket: func(hdr *ExtendedHeader, size ByteCount, ecn ECN, frames []Frame) { + for _, t := range tracers { + if t.ReceivedLongHeaderPacket != nil { + t.ReceivedLongHeaderPacket(hdr, size, ecn, frames) + } + } + }, + ReceivedShortHeaderPacket: func(hdr *ShortHeader, size ByteCount, ecn ECN, frames []Frame) { + for _, t := range tracers { + if t.ReceivedShortHeaderPacket != nil { + t.ReceivedShortHeaderPacket(hdr, size, ecn, frames) + } + } + }, + BufferedPacket: func(typ PacketType, size ByteCount) { + for _, t := range tracers { + if t.BufferedPacket != nil { + t.BufferedPacket(typ, size) + } + } + }, + DroppedPacket: func(typ PacketType, pn PacketNumber, size ByteCount, reason PacketDropReason) { + for _, t := range tracers { + if t.DroppedPacket != nil { + t.DroppedPacket(typ, pn, size, reason) + } + } + }, + UpdatedMetrics: func(rttStats *RTTStats, cwnd, bytesInFlight ByteCount, packetsInFlight int) { + for _, t := range tracers { + if t.UpdatedMetrics != nil { + t.UpdatedMetrics(rttStats, cwnd, bytesInFlight, packetsInFlight) + } + } + }, + AcknowledgedPacket: func(encLevel EncryptionLevel, pn PacketNumber) { + for _, t := range tracers { + if t.AcknowledgedPacket != nil { + t.AcknowledgedPacket(encLevel, pn) + } + } + }, + LostPacket: func(encLevel EncryptionLevel, pn PacketNumber, reason PacketLossReason) { + for _, t := range tracers { + if t.LostPacket != nil { + t.LostPacket(encLevel, pn, reason) + } + } + }, + UpdatedMTU: func(mtu ByteCount, done bool) { + for _, t := range tracers { + if t.UpdatedMTU != nil { + t.UpdatedMTU(mtu, done) + } + } + }, + UpdatedCongestionState: func(state CongestionState) { + for _, t := range tracers { + if t.UpdatedCongestionState != nil { + t.UpdatedCongestionState(state) + } + } + }, + UpdatedPTOCount: func(value uint32) { + for _, t := range tracers { + if t.UpdatedPTOCount != nil { + t.UpdatedPTOCount(value) + } + } + }, + UpdatedKeyFromTLS: func(encLevel EncryptionLevel, perspective Perspective) { + for _, t := range tracers { + if t.UpdatedKeyFromTLS != nil { + t.UpdatedKeyFromTLS(encLevel, perspective) + } + } + }, + UpdatedKey: func(generation KeyPhase, remote bool) { + for _, t := range tracers { + if t.UpdatedKey != nil { + t.UpdatedKey(generation, remote) + } + } + }, + DroppedEncryptionLevel: func(encLevel EncryptionLevel) { + for _, t := range tracers { + if t.DroppedEncryptionLevel != nil { + t.DroppedEncryptionLevel(encLevel) + } + } + }, + DroppedKey: func(generation KeyPhase) { + for _, t := range tracers { + if t.DroppedKey != nil { + t.DroppedKey(generation) + } + } + }, + SetLossTimer: func(typ TimerType, encLevel EncryptionLevel, exp time.Time) { + for _, t := range tracers { + if t.SetLossTimer != nil { + t.SetLossTimer(typ, encLevel, exp) + } + } + }, + LossTimerExpired: func(typ TimerType, encLevel EncryptionLevel) { + for _, t := range tracers { + if t.LossTimerExpired != nil { + t.LossTimerExpired(typ, encLevel) + } + } + }, + LossTimerCanceled: func() { + for _, t := range tracers { + if t.LossTimerCanceled != nil { + t.LossTimerCanceled() + } + } + }, + ECNStateUpdated: func(state ECNState, trigger ECNStateTrigger) { + for _, t := range tracers { + if t.ECNStateUpdated != nil { + t.ECNStateUpdated(state, trigger) + } + } + }, + ChoseALPN: func(protocol string) { + for _, t := range tracers { + if t.ChoseALPN != nil { + t.ChoseALPN(protocol) + } + } + }, + Close: func() { + for _, t := range tracers { + if t.Close != nil { + t.Close() + } + } + }, + Debug: func(name, msg string) { + for _, t := range tracers { + if t.Debug != nil { + t.Debug(name, msg) + } + } + }, + } +} diff --git a/vendor/github.com/quic-go/quic-go/logging/frame.go b/vendor/github.com/quic-go/quic-go/logging/frame.go new file mode 100644 index 00000000..9a055db3 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/logging/frame.go @@ -0,0 +1,66 @@ +package logging + +import "github.com/quic-go/quic-go/internal/wire" + +// A Frame is a QUIC frame +type Frame interface{} + +// The AckRange is used within the AckFrame. +// It is a range of packet numbers that is being acknowledged. +type AckRange = wire.AckRange + +type ( + // An AckFrame is an ACK frame. + AckFrame = wire.AckFrame + // A ConnectionCloseFrame is a CONNECTION_CLOSE frame. + ConnectionCloseFrame = wire.ConnectionCloseFrame + // A DataBlockedFrame is a DATA_BLOCKED frame. + DataBlockedFrame = wire.DataBlockedFrame + // A HandshakeDoneFrame is a HANDSHAKE_DONE frame. + HandshakeDoneFrame = wire.HandshakeDoneFrame + // A MaxDataFrame is a MAX_DATA frame. + MaxDataFrame = wire.MaxDataFrame + // A MaxStreamDataFrame is a MAX_STREAM_DATA frame. + MaxStreamDataFrame = wire.MaxStreamDataFrame + // A MaxStreamsFrame is a MAX_STREAMS_FRAME. + MaxStreamsFrame = wire.MaxStreamsFrame + // A NewConnectionIDFrame is a NEW_CONNECTION_ID frame. + NewConnectionIDFrame = wire.NewConnectionIDFrame + // A NewTokenFrame is a NEW_TOKEN frame. + NewTokenFrame = wire.NewTokenFrame + // A PathChallengeFrame is a PATH_CHALLENGE frame. + PathChallengeFrame = wire.PathChallengeFrame + // A PathResponseFrame is a PATH_RESPONSE frame. + PathResponseFrame = wire.PathResponseFrame + // A PingFrame is a PING frame. + PingFrame = wire.PingFrame + // A ResetStreamFrame is a RESET_STREAM frame. + ResetStreamFrame = wire.ResetStreamFrame + // A RetireConnectionIDFrame is a RETIRE_CONNECTION_ID frame. + RetireConnectionIDFrame = wire.RetireConnectionIDFrame + // A StopSendingFrame is a STOP_SENDING frame. + StopSendingFrame = wire.StopSendingFrame + // A StreamsBlockedFrame is a STREAMS_BLOCKED frame. + StreamsBlockedFrame = wire.StreamsBlockedFrame + // A StreamDataBlockedFrame is a STREAM_DATA_BLOCKED frame. + StreamDataBlockedFrame = wire.StreamDataBlockedFrame +) + +// A CryptoFrame is a CRYPTO frame. +type CryptoFrame struct { + Offset ByteCount + Length ByteCount +} + +// A StreamFrame is a STREAM frame. +type StreamFrame struct { + StreamID StreamID + Offset ByteCount + Length ByteCount + Fin bool +} + +// A DatagramFrame is a DATAGRAM frame. +type DatagramFrame struct { + Length ByteCount +} diff --git a/vendor/github.com/quic-go/quic-go/logging/interface.go b/vendor/github.com/quic-go/quic-go/logging/interface.go new file mode 100644 index 00000000..1f8edb92 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/logging/interface.go @@ -0,0 +1,111 @@ +// Package logging defines a logging interface for quic-go. +// This package should not be considered stable +package logging + +import ( + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" +) + +type ( + // A ByteCount is used to count bytes. + ByteCount = protocol.ByteCount + // ECN is the ECN value + ECN = protocol.ECN + // A ConnectionID is a QUIC Connection ID. + ConnectionID = protocol.ConnectionID + // An ArbitraryLenConnectionID is a QUIC Connection ID that can be up to 255 bytes long. + ArbitraryLenConnectionID = protocol.ArbitraryLenConnectionID + // The EncryptionLevel is the encryption level of a packet. + EncryptionLevel = protocol.EncryptionLevel + // The KeyPhase is the key phase of the 1-RTT keys. + KeyPhase = protocol.KeyPhase + // The KeyPhaseBit is the value of the key phase bit of the 1-RTT packets. + KeyPhaseBit = protocol.KeyPhaseBit + // The PacketNumber is the packet number of a packet. + PacketNumber = protocol.PacketNumber + // The Perspective is the role of a QUIC endpoint (client or server). + Perspective = protocol.Perspective + // A StatelessResetToken is a stateless reset token. + StatelessResetToken = protocol.StatelessResetToken + // The StreamID is the stream ID. + StreamID = protocol.StreamID + // The StreamNum is the number of the stream. + StreamNum = protocol.StreamNum + // The StreamType is the type of the stream (unidirectional or bidirectional). + StreamType = protocol.StreamType + // The Version is the QUIC version. + Version = protocol.Version + + // The Header is the QUIC packet header, before removing header protection. + Header = wire.Header + // The ExtendedHeader is the QUIC Long Header packet header, after removing header protection. + ExtendedHeader = wire.ExtendedHeader + // The TransportParameters are QUIC transport parameters. + TransportParameters = wire.TransportParameters + // The PreferredAddress is the preferred address sent in the transport parameters. + PreferredAddress = wire.PreferredAddress + + // A TransportError is a transport-level error code. + TransportError = qerr.TransportErrorCode + // An ApplicationError is an application-defined error code. + ApplicationError = qerr.TransportErrorCode + + // The RTTStats contain statistics used by the congestion controller. + RTTStats = utils.RTTStats +) + +const ( + // ECNUnsupported means that no ECN value was set / received + ECNUnsupported = protocol.ECNUnsupported + // ECTNot is Not-ECT + ECTNot = protocol.ECNNon + // ECT0 is ECT(0) + ECT0 = protocol.ECT0 + // ECT1 is ECT(1) + ECT1 = protocol.ECT1 + // ECNCE is CE + ECNCE = protocol.ECNCE +) + +const ( + // KeyPhaseZero is key phase bit 0 + KeyPhaseZero = protocol.KeyPhaseZero + // KeyPhaseOne is key phase bit 1 + KeyPhaseOne = protocol.KeyPhaseOne +) + +const ( + // PerspectiveServer is used for a QUIC server + PerspectiveServer = protocol.PerspectiveServer + // PerspectiveClient is used for a QUIC client + PerspectiveClient = protocol.PerspectiveClient +) + +const ( + // EncryptionInitial is the Initial encryption level + EncryptionInitial = protocol.EncryptionInitial + // EncryptionHandshake is the Handshake encryption level + EncryptionHandshake = protocol.EncryptionHandshake + // Encryption1RTT is the 1-RTT encryption level + Encryption1RTT = protocol.Encryption1RTT + // Encryption0RTT is the 0-RTT encryption level + Encryption0RTT = protocol.Encryption0RTT +) + +const ( + // StreamTypeUni is a unidirectional stream + StreamTypeUni = protocol.StreamTypeUni + // StreamTypeBidi is a bidirectional stream + StreamTypeBidi = protocol.StreamTypeBidi +) + +// The ShortHeader is the QUIC Short Header packet header, after removing header protection. +type ShortHeader struct { + DestConnectionID ConnectionID + PacketNumber PacketNumber + PacketNumberLen protocol.PacketNumberLen + KeyPhase KeyPhaseBit +} diff --git a/vendor/github.com/quic-go/quic-go/logging/packet_header.go b/vendor/github.com/quic-go/quic-go/logging/packet_header.go new file mode 100644 index 00000000..6b8df58d --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/logging/packet_header.go @@ -0,0 +1,24 @@ +package logging + +import ( + "github.com/quic-go/quic-go/internal/protocol" +) + +// PacketTypeFromHeader determines the packet type from a *wire.Header. +func PacketTypeFromHeader(hdr *Header) PacketType { + if hdr.Version == 0 { + return PacketTypeVersionNegotiation + } + switch hdr.Type { + case protocol.PacketTypeInitial: + return PacketTypeInitial + case protocol.PacketTypeHandshake: + return PacketTypeHandshake + case protocol.PacketType0RTT: + return PacketType0RTT + case protocol.PacketTypeRetry: + return PacketTypeRetry + default: + return PacketTypeNotDetermined + } +} diff --git a/vendor/github.com/quic-go/quic-go/logging/tracer.go b/vendor/github.com/quic-go/quic-go/logging/tracer.go new file mode 100644 index 00000000..625a809e --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/logging/tracer.go @@ -0,0 +1,59 @@ +package logging + +import "net" + +// A Tracer traces events. +type Tracer struct { + SentPacket func(net.Addr, *Header, ByteCount, []Frame) + SentVersionNegotiationPacket func(_ net.Addr, dest, src ArbitraryLenConnectionID, _ []Version) + DroppedPacket func(net.Addr, PacketType, ByteCount, PacketDropReason) + Debug func(name, msg string) + Close func() +} + +// NewMultiplexedTracer creates a new tracer that multiplexes events to multiple tracers. +func NewMultiplexedTracer(tracers ...*Tracer) *Tracer { + if len(tracers) == 0 { + return nil + } + if len(tracers) == 1 { + return tracers[0] + } + return &Tracer{ + SentPacket: func(remote net.Addr, hdr *Header, size ByteCount, frames []Frame) { + for _, t := range tracers { + if t.SentPacket != nil { + t.SentPacket(remote, hdr, size, frames) + } + } + }, + SentVersionNegotiationPacket: func(remote net.Addr, dest, src ArbitraryLenConnectionID, versions []Version) { + for _, t := range tracers { + if t.SentVersionNegotiationPacket != nil { + t.SentVersionNegotiationPacket(remote, dest, src, versions) + } + } + }, + DroppedPacket: func(remote net.Addr, typ PacketType, size ByteCount, reason PacketDropReason) { + for _, t := range tracers { + if t.DroppedPacket != nil { + t.DroppedPacket(remote, typ, size, reason) + } + } + }, + Debug: func(name, msg string) { + for _, t := range tracers { + if t.Debug != nil { + t.Debug(name, msg) + } + } + }, + Close: func() { + for _, t := range tracers { + if t.Close != nil { + t.Close() + } + } + }, + } +} diff --git a/vendor/github.com/quic-go/quic-go/logging/types.go b/vendor/github.com/quic-go/quic-go/logging/types.go new file mode 100644 index 00000000..0d79b0a9 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/logging/types.go @@ -0,0 +1,128 @@ +package logging + +// PacketType is the packet type of a QUIC packet +type PacketType uint8 + +const ( + // PacketTypeInitial is the packet type of an Initial packet + PacketTypeInitial PacketType = iota + // PacketTypeHandshake is the packet type of a Handshake packet + PacketTypeHandshake + // PacketTypeRetry is the packet type of a Retry packet + PacketTypeRetry + // PacketType0RTT is the packet type of a 0-RTT packet + PacketType0RTT + // PacketTypeVersionNegotiation is the packet type of a Version Negotiation packet + PacketTypeVersionNegotiation + // PacketType1RTT is a 1-RTT packet + PacketType1RTT + // PacketTypeStatelessReset is a stateless reset + PacketTypeStatelessReset + // PacketTypeNotDetermined is the packet type when it could not be determined + PacketTypeNotDetermined +) + +type PacketLossReason uint8 + +const ( + // PacketLossReorderingThreshold: when a packet is deemed lost due to reordering threshold + PacketLossReorderingThreshold PacketLossReason = iota + // PacketLossTimeThreshold: when a packet is deemed lost due to time threshold + PacketLossTimeThreshold +) + +type PacketDropReason uint8 + +const ( + // PacketDropKeyUnavailable is used when a packet is dropped because keys are unavailable + PacketDropKeyUnavailable PacketDropReason = iota + // PacketDropUnknownConnectionID is used when a packet is dropped because the connection ID is unknown + PacketDropUnknownConnectionID + // PacketDropHeaderParseError is used when a packet is dropped because header parsing failed + PacketDropHeaderParseError + // PacketDropPayloadDecryptError is used when a packet is dropped because decrypting the payload failed + PacketDropPayloadDecryptError + // PacketDropProtocolViolation is used when a packet is dropped due to a protocol violation + PacketDropProtocolViolation + // PacketDropDOSPrevention is used when a packet is dropped to mitigate a DoS attack + PacketDropDOSPrevention + // PacketDropUnsupportedVersion is used when a packet is dropped because the version is not supported + PacketDropUnsupportedVersion + // PacketDropUnexpectedPacket is used when an unexpected packet is received + PacketDropUnexpectedPacket + // PacketDropUnexpectedSourceConnectionID is used when a packet with an unexpected source connection ID is received + PacketDropUnexpectedSourceConnectionID + // PacketDropUnexpectedVersion is used when a packet with an unexpected version is received + PacketDropUnexpectedVersion + // PacketDropDuplicate is used when a duplicate packet is received + PacketDropDuplicate +) + +// TimerType is the type of the loss detection timer +type TimerType uint8 + +const ( + // TimerTypeACK is the timer type for the early retransmit timer + TimerTypeACK TimerType = iota + // TimerTypePTO is the timer type for the PTO retransmit timer + TimerTypePTO +) + +// TimeoutReason is the reason why a connection is closed +type TimeoutReason uint8 + +const ( + // TimeoutReasonHandshake is used when the connection is closed due to a handshake timeout + // This reason is not defined in the qlog draft, but very useful for debugging. + TimeoutReasonHandshake TimeoutReason = iota + // TimeoutReasonIdle is used when the connection is closed due to an idle timeout + // This reason is not defined in the qlog draft, but very useful for debugging. + TimeoutReasonIdle +) + +type CongestionState uint8 + +const ( + // CongestionStateSlowStart is the slow start phase of Reno / Cubic + CongestionStateSlowStart CongestionState = iota + // CongestionStateCongestionAvoidance is the slow start phase of Reno / Cubic + CongestionStateCongestionAvoidance + // CongestionStateRecovery is the recovery phase of Reno / Cubic + CongestionStateRecovery + // CongestionStateApplicationLimited means that the congestion controller is application limited + CongestionStateApplicationLimited +) + +// ECNState is the state of the ECN state machine (see Appendix A.4 of RFC 9000) +type ECNState uint8 + +const ( + // ECNStateTesting is the testing state + ECNStateTesting ECNState = 1 + iota + // ECNStateUnknown is the unknown state + ECNStateUnknown + // ECNStateFailed is the failed state + ECNStateFailed + // ECNStateCapable is the capable state + ECNStateCapable +) + +// ECNStateTrigger is a trigger for an ECN state transition. +type ECNStateTrigger uint8 + +const ( + ECNTriggerNoTrigger ECNStateTrigger = iota + // ECNFailedNoECNCounts is emitted when an ACK acknowledges ECN-marked packets, + // but doesn't contain any ECN counts + ECNFailedNoECNCounts + // ECNFailedDecreasedECNCounts is emitted when an ACK frame decreases ECN counts + ECNFailedDecreasedECNCounts + // ECNFailedLostAllTestingPackets is emitted when all ECN testing packets are declared lost + ECNFailedLostAllTestingPackets + // ECNFailedMoreECNCountsThanSent is emitted when an ACK contains more ECN counts than ECN-marked packets were sent + ECNFailedMoreECNCountsThanSent + // ECNFailedTooFewECNCounts is emitted when an ACK contains fewer ECN counts than it acknowledges packets + ECNFailedTooFewECNCounts + // ECNFailedManglingDetected is emitted when the path marks all ECN-marked packets as CE + ECNFailedManglingDetected +) diff --git a/vendor/github.com/quic-go/quic-go/mockgen.go b/vendor/github.com/quic-go/quic-go/mockgen.go new file mode 100644 index 00000000..65ec465a --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/mockgen.go @@ -0,0 +1,69 @@ +//go:build gomock || generate + +package quic + +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_send_conn_test.go github.com/quic-go/quic-go SendConn" +type SendConn = sendConn + +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_raw_conn_test.go github.com/quic-go/quic-go RawConn" +type RawConn = rawConn + +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_sender_test.go github.com/quic-go/quic-go Sender" +type Sender = sender + +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_stream_internal_test.go github.com/quic-go/quic-go StreamI" +type StreamI = streamI + +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_receive_stream_internal_test.go github.com/quic-go/quic-go ReceiveStreamI" +type ReceiveStreamI = receiveStreamI + +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_send_stream_internal_test.go github.com/quic-go/quic-go SendStreamI" +type SendStreamI = sendStreamI + +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_stream_sender_test.go github.com/quic-go/quic-go StreamSender" +type StreamSender = streamSender + +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_stream_control_frame_getter_test.go github.com/quic-go/quic-go StreamControlFrameGetter" +type StreamControlFrameGetter = streamControlFrameGetter + +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_frame_source_test.go github.com/quic-go/quic-go FrameSource" +type FrameSource = frameSource + +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_ack_frame_source_test.go github.com/quic-go/quic-go AckFrameSource" +type AckFrameSource = ackFrameSource + +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_stream_manager_test.go github.com/quic-go/quic-go StreamManager" +type StreamManager = streamManager + +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_sealing_manager_test.go github.com/quic-go/quic-go SealingManager" +type SealingManager = sealingManager + +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_unpacker_test.go github.com/quic-go/quic-go Unpacker" +type Unpacker = unpacker + +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_packer_test.go github.com/quic-go/quic-go Packer" +type Packer = packer + +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_mtu_discoverer_test.go github.com/quic-go/quic-go MTUDiscoverer" +type MTUDiscoverer = mtuDiscoverer + +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_conn_runner_test.go github.com/quic-go/quic-go ConnRunner" +type ConnRunner = connRunner + +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_quic_conn_test.go github.com/quic-go/quic-go QUICConn" +type QUICConn = quicConn + +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_packet_handler_test.go github.com/quic-go/quic-go PacketHandler" +type PacketHandler = packetHandler + +//go:generate sh -c "go run go.uber.org/mock/mockgen -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_packet_handler_manager_test.go github.com/quic-go/quic-go PacketHandlerManager" + +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_packet_handler_manager_test.go github.com/quic-go/quic-go PacketHandlerManager" +type PacketHandlerManager = packetHandlerManager + +// Need to use source mode for the batchConn, since reflect mode follows type aliases. +// See https://github.com/golang/mock/issues/244 for details. +// +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -package quic -self_package github.com/quic-go/quic-go -source sys_conn_oob.go -destination mock_batch_conn_test.go -mock_names batchConn=MockBatchConn" + +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -package quic -self_package github.com/quic-go/quic-go -self_package github.com/quic-go/quic-go -destination mock_packetconn_test.go net PacketConn" diff --git a/vendor/github.com/quic-go/quic-go/mtu_discoverer.go b/vendor/github.com/quic-go/quic-go/mtu_discoverer.go new file mode 100644 index 00000000..3f3a640a --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/mtu_discoverer.go @@ -0,0 +1,229 @@ +package quic + +import ( + "time" + + "github.com/quic-go/quic-go/internal/ackhandler" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" + "github.com/quic-go/quic-go/logging" +) + +type mtuDiscoverer interface { + // Start starts the MTU discovery process. + // It's unnecessary to call ShouldSendProbe before that. + Start() + ShouldSendProbe(now time.Time) bool + CurrentSize() protocol.ByteCount + GetPing() (ping ackhandler.Frame, datagramSize protocol.ByteCount) +} + +const ( + // At some point, we have to stop searching for a higher MTU. + // We're happy to send a packet that's 10 bytes smaller than the actual MTU. + maxMTUDiff = 20 + // send a probe packet every mtuProbeDelay RTTs + mtuProbeDelay = 5 + // Once maxLostMTUProbes MTU probe packets larger than a certain size are lost, + // MTU discovery won't probe for larger MTUs than this size. + // The algorithm used here is resilient to packet loss of (maxLostMTUProbes - 1) packets. + maxLostMTUProbes = 3 +) + +// The Path MTU is found by sending a larger packet every now and then. +// If the packet is acknowledged, we conclude that the path supports this larger packet size. +// If the packet is lost, this can mean one of two things: +// 1. The path doesn't support this larger packet size, or +// 2. The packet was lost due to packet loss, independent of its size. +// The algorithm used here is resilient to packet loss of (maxLostMTUProbes - 1) packets. +// For simplicty, the following example use maxLostMTUProbes = 2. +// +// Initialization: +// |------------------------------------------------------------------------------| +// min max +// +// The first MTU probe packet will have size (min+max)/2. +// Assume that this packet is acknowledged. We can now move the min marker, +// and continue the search in the resulting interval. +// +// If 1st probe packet acknowledged: +// |---------------------------------------|--------------------------------------| +// min max +// +// If 1st probe packet lost: +// |---------------------------------------|--------------------------------------| +// min lost[0] max +// +// We can't conclude that the path doesn't support this packet size, since the loss of the probe +// packet could have been unrelated to the packet size. A larger probe packet will be sent later on. +// After a loss, the next probe packet has size (min+lost[0])/2. +// Now assume this probe packet is acknowledged: +// +// 2nd probe packet acknowledged: +// |------------------|--------------------|--------------------------------------| +// min lost[0] max +// +// First of all, we conclude that the path supports at least this MTU. That's progress! +// Second, we probe a bit more aggressively with the next probe packet: +// After an acknowledgement, the next probe packet has size (min+max)/2. +// This means we'll send a packet larger than the first probe packet (which was lost). +// +// If 3rd probe packet acknowledged: +// |-------------------------------------------------|----------------------------| +// min max +// +// We can conclude that the loss of the 1st probe packet was not due to its size, and +// continue searching in a much smaller interval now. +// +// If 3rd probe packet lost: +// |------------------|--------------------|---------|----------------------------| +// min lost[0] max +// +// Since in our example numPTOProbes = 2, and we lost 2 packets smaller than max, we +// conclude that this packet size is not supported on the path, and reduce the maximum +// value of the search interval. +// +// MTU discovery concludes once the interval min and max has been narrowed down to maxMTUDiff. + +type mtuFinder struct { + lastProbeTime time.Time + mtuIncreased func(protocol.ByteCount) + + rttStats *utils.RTTStats + + inFlight protocol.ByteCount // the size of the probe packet currently in flight. InvalidByteCount if none is in flight + min protocol.ByteCount + limit protocol.ByteCount + + // on initialization, we treat the maximum size as the first "lost" packet + lost [maxLostMTUProbes]protocol.ByteCount + lastProbeWasLost bool + + tracer *logging.ConnectionTracer +} + +var _ mtuDiscoverer = &mtuFinder{} + +func newMTUDiscoverer( + rttStats *utils.RTTStats, + start, max protocol.ByteCount, + mtuIncreased func(protocol.ByteCount), + tracer *logging.ConnectionTracer, +) *mtuFinder { + f := &mtuFinder{ + inFlight: protocol.InvalidByteCount, + min: start, + limit: max, + rttStats: rttStats, + mtuIncreased: mtuIncreased, + tracer: tracer, + } + for i := range f.lost { + if i == 0 { + f.lost[i] = max + continue + } + f.lost[i] = protocol.InvalidByteCount + } + return f +} + +func (f *mtuFinder) done() bool { + return f.max()-f.min <= maxMTUDiff+1 +} + +func (f *mtuFinder) max() protocol.ByteCount { + for i, v := range f.lost { + if v == protocol.InvalidByteCount { + return f.lost[i-1] + } + } + return f.lost[len(f.lost)-1] +} + +func (f *mtuFinder) Start() { + f.lastProbeTime = time.Now() // makes sure the first probe packet is not sent immediately +} + +func (f *mtuFinder) ShouldSendProbe(now time.Time) bool { + if f.lastProbeTime.IsZero() { + return false + } + if f.inFlight != protocol.InvalidByteCount || f.done() { + return false + } + return !now.Before(f.lastProbeTime.Add(mtuProbeDelay * f.rttStats.SmoothedRTT())) +} + +func (f *mtuFinder) GetPing() (ackhandler.Frame, protocol.ByteCount) { + var size protocol.ByteCount + if f.lastProbeWasLost { + size = (f.min + f.lost[0]) / 2 + } else { + size = (f.min + f.max()) / 2 + } + f.lastProbeTime = time.Now() + f.inFlight = size + return ackhandler.Frame{ + Frame: &wire.PingFrame{}, + Handler: &mtuFinderAckHandler{f}, + }, size +} + +func (f *mtuFinder) CurrentSize() protocol.ByteCount { + return f.min +} + +type mtuFinderAckHandler struct { + *mtuFinder +} + +var _ ackhandler.FrameHandler = &mtuFinderAckHandler{} + +func (h *mtuFinderAckHandler) OnAcked(wire.Frame) { + size := h.inFlight + if size == protocol.InvalidByteCount { + panic("OnAcked callback called although there's no MTU probe packet in flight") + } + h.inFlight = protocol.InvalidByteCount + h.min = size + h.lastProbeWasLost = false + // remove all values smaller than size from the lost array + var j int + for i, v := range h.lost { + if size < v { + j = i + break + } + } + if j > 0 { + for i := 0; i < len(h.lost); i++ { + if i+j < len(h.lost) { + h.lost[i] = h.lost[i+j] + } else { + h.lost[i] = protocol.InvalidByteCount + } + } + } + if h.tracer != nil && h.tracer.UpdatedMTU != nil { + h.tracer.UpdatedMTU(size, h.done()) + } + h.mtuIncreased(size) +} + +func (h *mtuFinderAckHandler) OnLost(wire.Frame) { + size := h.inFlight + if size == protocol.InvalidByteCount { + panic("OnLost callback called although there's no MTU probe packet in flight") + } + h.lastProbeWasLost = true + h.inFlight = protocol.InvalidByteCount + for i, v := range h.lost { + if size < v { + copy(h.lost[i+1:], h.lost[i:]) + h.lost[i] = size + break + } + } +} diff --git a/vendor/github.com/quic-go/quic-go/multiplexer.go b/vendor/github.com/quic-go/quic-go/multiplexer.go new file mode 100644 index 00000000..85f7f403 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/multiplexer.go @@ -0,0 +1,75 @@ +package quic + +import ( + "fmt" + "net" + "sync" + + "github.com/quic-go/quic-go/internal/utils" +) + +var ( + connMuxerOnce sync.Once + connMuxer multiplexer +) + +type indexableConn interface{ LocalAddr() net.Addr } + +type multiplexer interface { + AddConn(conn indexableConn) + RemoveConn(indexableConn) error +} + +// The connMultiplexer listens on multiple net.PacketConns and dispatches +// incoming packets to the connection handler. +type connMultiplexer struct { + mutex sync.Mutex + + conns map[string] /* LocalAddr().String() */ indexableConn + logger utils.Logger +} + +var _ multiplexer = &connMultiplexer{} + +func getMultiplexer() multiplexer { + connMuxerOnce.Do(func() { + connMuxer = &connMultiplexer{ + conns: make(map[string]indexableConn), + logger: utils.DefaultLogger.WithPrefix("muxer"), + } + }) + return connMuxer +} + +func (m *connMultiplexer) index(addr net.Addr) string { + return addr.Network() + " " + addr.String() +} + +func (m *connMultiplexer) AddConn(c indexableConn) { + m.mutex.Lock() + defer m.mutex.Unlock() + + connIndex := m.index(c.LocalAddr()) + p, ok := m.conns[connIndex] + if ok { + // Panics if we're already listening on this connection. + // This is a safeguard because we're introducing a breaking API change, see + // https://github.com/quic-go/quic-go/issues/3727 for details. + // We'll remove this at a later time, when most users of the library have made the switch. + panic("connection already exists") // TODO: write a nice message + } + m.conns[connIndex] = p +} + +func (m *connMultiplexer) RemoveConn(c indexableConn) error { + m.mutex.Lock() + defer m.mutex.Unlock() + + connIndex := m.index(c.LocalAddr()) + if _, ok := m.conns[connIndex]; !ok { + return fmt.Errorf("cannote remove connection, connection is unknown") + } + + delete(m.conns, connIndex) + return nil +} diff --git a/vendor/github.com/quic-go/quic-go/oss-fuzz.sh b/vendor/github.com/quic-go/quic-go/oss-fuzz.sh new file mode 100644 index 00000000..92a57a2c --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/oss-fuzz.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Install Go manually, since oss-fuzz ships with an outdated Go version. +# See https://github.com/google/oss-fuzz/pull/10643. +export CXX="${CXX} -lresolv" # required by Go 1.20 +wget https://go.dev/dl/go1.23.0.linux-amd64.tar.gz \ + && mkdir temp-go \ + && rm -rf /root/.go/* \ + && tar -C temp-go/ -xzf go1.23.0.linux-amd64.tar.gz \ + && mv temp-go/go/* /root/.go/ \ + && rm -rf temp-go go1.23.0.linux-amd64.tar.gz + +( +# fuzz qpack +compile_go_fuzzer github.com/quic-go/qpack/fuzzing Fuzz qpack_fuzzer +) + +( +# fuzz quic-go +compile_go_fuzzer github.com/quic-go/quic-go/fuzzing/frames Fuzz frame_fuzzer +compile_go_fuzzer github.com/quic-go/quic-go/fuzzing/header Fuzz header_fuzzer +compile_go_fuzzer github.com/quic-go/quic-go/fuzzing/transportparameters Fuzz transportparameter_fuzzer +compile_go_fuzzer github.com/quic-go/quic-go/fuzzing/tokens Fuzz token_fuzzer +compile_go_fuzzer github.com/quic-go/quic-go/fuzzing/handshake Fuzz handshake_fuzzer + +if [ $SANITIZER == "coverage" ]; then + # no need for corpora if coverage + exit 0 +fi + +# generate seed corpora +cd $GOPATH/src/github.com/quic-go/quic-go/ +go generate -x ./fuzzing/... + +zip --quiet -r $OUT/header_fuzzer_seed_corpus.zip fuzzing/header/corpus +zip --quiet -r $OUT/frame_fuzzer_seed_corpus.zip fuzzing/frames/corpus +zip --quiet -r $OUT/transportparameter_fuzzer_seed_corpus.zip fuzzing/transportparameters/corpus +zip --quiet -r $OUT/handshake_fuzzer_seed_corpus.zip fuzzing/handshake/corpus +) + +# for debugging +ls -al $OUT diff --git a/vendor/github.com/quic-go/quic-go/packet_handler_map.go b/vendor/github.com/quic-go/quic-go/packet_handler_map.go new file mode 100644 index 00000000..7840202c --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/packet_handler_map.go @@ -0,0 +1,255 @@ +package quic + +import ( + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "hash" + "io" + "net" + "sync" + "time" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" +) + +type connCapabilities struct { + // This connection has the Don't Fragment (DF) bit set. + // This means it makes to run DPLPMTUD. + DF bool + // GSO (Generic Segmentation Offload) supported + GSO bool + // ECN (Explicit Congestion Notifications) supported + ECN bool +} + +// rawConn is a connection that allow reading of a receivedPackeh. +type rawConn interface { + ReadPacket() (receivedPacket, error) + // WritePacket writes a packet on the wire. + // gsoSize is the size of a single packet, or 0 to disable GSO. + // It is invalid to set gsoSize if capabilities.GSO is not set. + WritePacket(b []byte, addr net.Addr, packetInfoOOB []byte, gsoSize uint16, ecn protocol.ECN) (int, error) + LocalAddr() net.Addr + SetReadDeadline(time.Time) error + io.Closer + + capabilities() connCapabilities +} + +type closePacket struct { + payload []byte + addr net.Addr + info packetInfo +} + +type packetHandlerMap struct { + mutex sync.Mutex + handlers map[protocol.ConnectionID]packetHandler + resetTokens map[protocol.StatelessResetToken] /* stateless reset token */ packetHandler + + closed bool + closeChan chan struct{} + + enqueueClosePacket func(closePacket) + + deleteRetiredConnsAfter time.Duration + + statelessResetMutex sync.Mutex + statelessResetHasher hash.Hash + + logger utils.Logger +} + +var _ packetHandlerManager = &packetHandlerMap{} + +func newPacketHandlerMap(key *StatelessResetKey, enqueueClosePacket func(closePacket), logger utils.Logger) *packetHandlerMap { + h := &packetHandlerMap{ + closeChan: make(chan struct{}), + handlers: make(map[protocol.ConnectionID]packetHandler), + resetTokens: make(map[protocol.StatelessResetToken]packetHandler), + deleteRetiredConnsAfter: protocol.RetiredConnectionIDDeleteTimeout, + enqueueClosePacket: enqueueClosePacket, + logger: logger, + } + if key != nil { + h.statelessResetHasher = hmac.New(sha256.New, key[:]) + } + if h.logger.Debug() { + go h.logUsage() + } + return h +} + +func (h *packetHandlerMap) logUsage() { + ticker := time.NewTicker(2 * time.Second) + var printedZero bool + for { + select { + case <-h.closeChan: + return + case <-ticker.C: + } + + h.mutex.Lock() + numHandlers := len(h.handlers) + numTokens := len(h.resetTokens) + h.mutex.Unlock() + // If the number tracked handlers and tokens is zero, only print it a single time. + hasZero := numHandlers == 0 && numTokens == 0 + if !hasZero || (hasZero && !printedZero) { + h.logger.Debugf("Tracking %d connection IDs and %d reset tokens.\n", numHandlers, numTokens) + printedZero = false + if hasZero { + printedZero = true + } + } + } +} + +func (h *packetHandlerMap) Get(id protocol.ConnectionID) (packetHandler, bool) { + h.mutex.Lock() + defer h.mutex.Unlock() + + handler, ok := h.handlers[id] + return handler, ok +} + +func (h *packetHandlerMap) Add(id protocol.ConnectionID, handler packetHandler) bool /* was added */ { + h.mutex.Lock() + defer h.mutex.Unlock() + + if _, ok := h.handlers[id]; ok { + h.logger.Debugf("Not adding connection ID %s, as it already exists.", id) + return false + } + h.handlers[id] = handler + h.logger.Debugf("Adding connection ID %s.", id) + return true +} + +func (h *packetHandlerMap) AddWithConnID(clientDestConnID, newConnID protocol.ConnectionID, handler packetHandler) bool { + h.mutex.Lock() + defer h.mutex.Unlock() + + if _, ok := h.handlers[clientDestConnID]; ok { + h.logger.Debugf("Not adding connection ID %s for a new connection, as it already exists.", clientDestConnID) + return false + } + h.handlers[clientDestConnID] = handler + h.handlers[newConnID] = handler + h.logger.Debugf("Adding connection IDs %s and %s for a new connection.", clientDestConnID, newConnID) + return true +} + +func (h *packetHandlerMap) Remove(id protocol.ConnectionID) { + h.mutex.Lock() + delete(h.handlers, id) + h.mutex.Unlock() + h.logger.Debugf("Removing connection ID %s.", id) +} + +func (h *packetHandlerMap) Retire(id protocol.ConnectionID) { + h.logger.Debugf("Retiring connection ID %s in %s.", id, h.deleteRetiredConnsAfter) + time.AfterFunc(h.deleteRetiredConnsAfter, func() { + h.mutex.Lock() + delete(h.handlers, id) + h.mutex.Unlock() + h.logger.Debugf("Removing connection ID %s after it has been retired.", id) + }) +} + +// ReplaceWithClosed is called when a connection is closed. +// Depending on which side closed the connection, we need to: +// * remote close: absorb delayed packets +// * local close: retransmit the CONNECTION_CLOSE packet, in case it was lost +func (h *packetHandlerMap) ReplaceWithClosed(ids []protocol.ConnectionID, connClosePacket []byte) { + var handler packetHandler + if connClosePacket != nil { + handler = newClosedLocalConn( + func(addr net.Addr, info packetInfo) { + h.enqueueClosePacket(closePacket{payload: connClosePacket, addr: addr, info: info}) + }, + h.logger, + ) + } else { + handler = newClosedRemoteConn() + } + + h.mutex.Lock() + for _, id := range ids { + h.handlers[id] = handler + } + h.mutex.Unlock() + h.logger.Debugf("Replacing connection for connection IDs %s with a closed connection.", ids) + + time.AfterFunc(h.deleteRetiredConnsAfter, func() { + h.mutex.Lock() + for _, id := range ids { + delete(h.handlers, id) + } + h.mutex.Unlock() + h.logger.Debugf("Removing connection IDs %s for a closed connection after it has been retired.", ids) + }) +} + +func (h *packetHandlerMap) AddResetToken(token protocol.StatelessResetToken, handler packetHandler) { + h.mutex.Lock() + h.resetTokens[token] = handler + h.mutex.Unlock() +} + +func (h *packetHandlerMap) RemoveResetToken(token protocol.StatelessResetToken) { + h.mutex.Lock() + delete(h.resetTokens, token) + h.mutex.Unlock() +} + +func (h *packetHandlerMap) GetByResetToken(token protocol.StatelessResetToken) (packetHandler, bool) { + h.mutex.Lock() + defer h.mutex.Unlock() + + handler, ok := h.resetTokens[token] + return handler, ok +} + +func (h *packetHandlerMap) Close(e error) { + h.mutex.Lock() + + if h.closed { + h.mutex.Unlock() + return + } + + close(h.closeChan) + + var wg sync.WaitGroup + for _, handler := range h.handlers { + wg.Add(1) + go func(handler packetHandler) { + handler.destroy(e) + wg.Done() + }(handler) + } + h.closed = true + h.mutex.Unlock() + wg.Wait() +} + +func (h *packetHandlerMap) GetStatelessResetToken(connID protocol.ConnectionID) protocol.StatelessResetToken { + var token protocol.StatelessResetToken + if h.statelessResetHasher == nil { + // Return a random stateless reset token. + // This token will be sent in the server's transport parameters. + // By using a random token, an off-path attacker won't be able to disrupt the connection. + rand.Read(token[:]) + return token + } + h.statelessResetMutex.Lock() + h.statelessResetHasher.Write(connID.Bytes()) + copy(token[:], h.statelessResetHasher.Sum(nil)) + h.statelessResetHasher.Reset() + h.statelessResetMutex.Unlock() + return token +} diff --git a/vendor/github.com/quic-go/quic-go/packet_packer.go b/vendor/github.com/quic-go/quic-go/packet_packer.go new file mode 100644 index 00000000..8b8a03d4 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/packet_packer.go @@ -0,0 +1,914 @@ +package quic + +import ( + crand "crypto/rand" + "encoding/binary" + "errors" + "fmt" + + "golang.org/x/exp/rand" + + "github.com/quic-go/quic-go/internal/ackhandler" + "github.com/quic-go/quic-go/internal/handshake" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/wire" +) + +var errNothingToPack = errors.New("nothing to pack") + +type packer interface { + PackCoalescedPacket(onlyAck bool, maxPacketSize protocol.ByteCount, v protocol.Version) (*coalescedPacket, error) + PackAckOnlyPacket(maxPacketSize protocol.ByteCount, v protocol.Version) (shortHeaderPacket, *packetBuffer, error) + AppendPacket(buf *packetBuffer, maxPacketSize protocol.ByteCount, v protocol.Version) (shortHeaderPacket, error) + MaybePackProbePacket(protocol.EncryptionLevel, protocol.ByteCount, protocol.Version) (*coalescedPacket, error) + PackConnectionClose(*qerr.TransportError, protocol.ByteCount, protocol.Version) (*coalescedPacket, error) + PackApplicationClose(*qerr.ApplicationError, protocol.ByteCount, protocol.Version) (*coalescedPacket, error) + PackMTUProbePacket(ping ackhandler.Frame, size protocol.ByteCount, v protocol.Version) (shortHeaderPacket, *packetBuffer, error) + + SetToken([]byte) +} + +type sealer interface { + handshake.LongHeaderSealer +} + +type payload struct { + streamFrames []ackhandler.StreamFrame + frames []ackhandler.Frame + ack *wire.AckFrame + length protocol.ByteCount +} + +type longHeaderPacket struct { + header *wire.ExtendedHeader + ack *wire.AckFrame + frames []ackhandler.Frame + streamFrames []ackhandler.StreamFrame // only used for 0-RTT packets + + length protocol.ByteCount +} + +type shortHeaderPacket struct { + PacketNumber protocol.PacketNumber + Frames []ackhandler.Frame + StreamFrames []ackhandler.StreamFrame + Ack *wire.AckFrame + Length protocol.ByteCount + IsPathMTUProbePacket bool + + // used for logging + DestConnID protocol.ConnectionID + PacketNumberLen protocol.PacketNumberLen + KeyPhase protocol.KeyPhaseBit +} + +func (p *shortHeaderPacket) IsAckEliciting() bool { return ackhandler.HasAckElicitingFrames(p.Frames) } + +type coalescedPacket struct { + buffer *packetBuffer + longHdrPackets []*longHeaderPacket + shortHdrPacket *shortHeaderPacket +} + +// IsOnlyShortHeaderPacket says if this packet only contains a short header packet (and no long header packets). +func (p *coalescedPacket) IsOnlyShortHeaderPacket() bool { + return len(p.longHdrPackets) == 0 && p.shortHdrPacket != nil +} + +func (p *longHeaderPacket) EncryptionLevel() protocol.EncryptionLevel { + //nolint:exhaustive // Will never be called for Retry packets (and they don't have encrypted data). + switch p.header.Type { + case protocol.PacketTypeInitial: + return protocol.EncryptionInitial + case protocol.PacketTypeHandshake: + return protocol.EncryptionHandshake + case protocol.PacketType0RTT: + return protocol.Encryption0RTT + default: + panic("can't determine encryption level") + } +} + +func (p *longHeaderPacket) IsAckEliciting() bool { return ackhandler.HasAckElicitingFrames(p.frames) } + +type packetNumberManager interface { + PeekPacketNumber(protocol.EncryptionLevel) (protocol.PacketNumber, protocol.PacketNumberLen) + PopPacketNumber(protocol.EncryptionLevel) protocol.PacketNumber +} + +type sealingManager interface { + GetInitialSealer() (handshake.LongHeaderSealer, error) + GetHandshakeSealer() (handshake.LongHeaderSealer, error) + Get0RTTSealer() (handshake.LongHeaderSealer, error) + Get1RTTSealer() (handshake.ShortHeaderSealer, error) +} + +type frameSource interface { + HasData() bool + AppendStreamFrames([]ackhandler.StreamFrame, protocol.ByteCount, protocol.Version) ([]ackhandler.StreamFrame, protocol.ByteCount) + AppendControlFrames([]ackhandler.Frame, protocol.ByteCount, protocol.Version) ([]ackhandler.Frame, protocol.ByteCount) +} + +type ackFrameSource interface { + GetAckFrame(encLevel protocol.EncryptionLevel, onlyIfQueued bool) *wire.AckFrame +} + +type packetPacker struct { + srcConnID protocol.ConnectionID + getDestConnID func() protocol.ConnectionID + + perspective protocol.Perspective + cryptoSetup sealingManager + + initialStream *cryptoStream + handshakeStream *cryptoStream + + token []byte + + pnManager packetNumberManager + framer frameSource + acks ackFrameSource + datagramQueue *datagramQueue + retransmissionQueue *retransmissionQueue + rand rand.Rand + + numNonAckElicitingAcks int +} + +var _ packer = &packetPacker{} + +func newPacketPacker( + srcConnID protocol.ConnectionID, + getDestConnID func() protocol.ConnectionID, + initialStream, handshakeStream *cryptoStream, + packetNumberManager packetNumberManager, + retransmissionQueue *retransmissionQueue, + cryptoSetup sealingManager, + framer frameSource, + acks ackFrameSource, + datagramQueue *datagramQueue, + perspective protocol.Perspective, +) *packetPacker { + var b [8]byte + _, _ = crand.Read(b[:]) + + return &packetPacker{ + cryptoSetup: cryptoSetup, + getDestConnID: getDestConnID, + srcConnID: srcConnID, + initialStream: initialStream, + handshakeStream: handshakeStream, + retransmissionQueue: retransmissionQueue, + datagramQueue: datagramQueue, + perspective: perspective, + framer: framer, + acks: acks, + rand: *rand.New(rand.NewSource(binary.BigEndian.Uint64(b[:]))), + pnManager: packetNumberManager, + } +} + +// PackConnectionClose packs a packet that closes the connection with a transport error. +func (p *packetPacker) PackConnectionClose(e *qerr.TransportError, maxPacketSize protocol.ByteCount, v protocol.Version) (*coalescedPacket, error) { + var reason string + // don't send details of crypto errors + if !e.ErrorCode.IsCryptoError() { + reason = e.ErrorMessage + } + return p.packConnectionClose(false, uint64(e.ErrorCode), e.FrameType, reason, maxPacketSize, v) +} + +// PackApplicationClose packs a packet that closes the connection with an application error. +func (p *packetPacker) PackApplicationClose(e *qerr.ApplicationError, maxPacketSize protocol.ByteCount, v protocol.Version) (*coalescedPacket, error) { + return p.packConnectionClose(true, uint64(e.ErrorCode), 0, e.ErrorMessage, maxPacketSize, v) +} + +func (p *packetPacker) packConnectionClose( + isApplicationError bool, + errorCode uint64, + frameType uint64, + reason string, + maxPacketSize protocol.ByteCount, + v protocol.Version, +) (*coalescedPacket, error) { + var sealers [4]sealer + var hdrs [3]*wire.ExtendedHeader + var payloads [4]payload + var size protocol.ByteCount + var connID protocol.ConnectionID + var oneRTTPacketNumber protocol.PacketNumber + var oneRTTPacketNumberLen protocol.PacketNumberLen + var keyPhase protocol.KeyPhaseBit // only set for 1-RTT + var numLongHdrPackets uint8 + encLevels := [4]protocol.EncryptionLevel{protocol.EncryptionInitial, protocol.EncryptionHandshake, protocol.Encryption0RTT, protocol.Encryption1RTT} + for i, encLevel := range encLevels { + if p.perspective == protocol.PerspectiveServer && encLevel == protocol.Encryption0RTT { + continue + } + ccf := &wire.ConnectionCloseFrame{ + IsApplicationError: isApplicationError, + ErrorCode: errorCode, + FrameType: frameType, + ReasonPhrase: reason, + } + // don't send application errors in Initial or Handshake packets + if isApplicationError && (encLevel == protocol.EncryptionInitial || encLevel == protocol.EncryptionHandshake) { + ccf.IsApplicationError = false + ccf.ErrorCode = uint64(qerr.ApplicationErrorErrorCode) + ccf.ReasonPhrase = "" + } + pl := payload{ + frames: []ackhandler.Frame{{Frame: ccf}}, + length: ccf.Length(v), + } + + var sealer sealer + var err error + switch encLevel { + case protocol.EncryptionInitial: + sealer, err = p.cryptoSetup.GetInitialSealer() + case protocol.EncryptionHandshake: + sealer, err = p.cryptoSetup.GetHandshakeSealer() + case protocol.Encryption0RTT: + sealer, err = p.cryptoSetup.Get0RTTSealer() + case protocol.Encryption1RTT: + var s handshake.ShortHeaderSealer + s, err = p.cryptoSetup.Get1RTTSealer() + if err == nil { + keyPhase = s.KeyPhase() + } + sealer = s + } + if err == handshake.ErrKeysNotYetAvailable || err == handshake.ErrKeysDropped { + continue + } + if err != nil { + return nil, err + } + sealers[i] = sealer + var hdr *wire.ExtendedHeader + if encLevel == protocol.Encryption1RTT { + connID = p.getDestConnID() + oneRTTPacketNumber, oneRTTPacketNumberLen = p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) + size += p.shortHeaderPacketLength(connID, oneRTTPacketNumberLen, pl) + } else { + hdr = p.getLongHeader(encLevel, v) + hdrs[i] = hdr + size += p.longHeaderPacketLength(hdr, pl, v) + protocol.ByteCount(sealer.Overhead()) + numLongHdrPackets++ + } + payloads[i] = pl + } + buffer := getPacketBuffer() + packet := &coalescedPacket{ + buffer: buffer, + longHdrPackets: make([]*longHeaderPacket, 0, numLongHdrPackets), + } + for i, encLevel := range encLevels { + if sealers[i] == nil { + continue + } + var paddingLen protocol.ByteCount + if encLevel == protocol.EncryptionInitial { + paddingLen = p.initialPaddingLen(payloads[i].frames, size, maxPacketSize) + } + if encLevel == protocol.Encryption1RTT { + shp, err := p.appendShortHeaderPacket(buffer, connID, oneRTTPacketNumber, oneRTTPacketNumberLen, keyPhase, payloads[i], paddingLen, maxPacketSize, sealers[i], false, v) + if err != nil { + return nil, err + } + packet.shortHdrPacket = &shp + } else { + longHdrPacket, err := p.appendLongHeaderPacket(buffer, hdrs[i], payloads[i], paddingLen, encLevel, sealers[i], v) + if err != nil { + return nil, err + } + packet.longHdrPackets = append(packet.longHdrPackets, longHdrPacket) + } + } + return packet, nil +} + +// longHeaderPacketLength calculates the length of a serialized long header packet. +// It takes into account that packets that have a tiny payload need to be padded, +// such that len(payload) + packet number len >= 4 + AEAD overhead +func (p *packetPacker) longHeaderPacketLength(hdr *wire.ExtendedHeader, pl payload, v protocol.Version) protocol.ByteCount { + var paddingLen protocol.ByteCount + pnLen := protocol.ByteCount(hdr.PacketNumberLen) + if pl.length < 4-pnLen { + paddingLen = 4 - pnLen - pl.length + } + return hdr.GetLength(v) + pl.length + paddingLen +} + +// shortHeaderPacketLength calculates the length of a serialized short header packet. +// It takes into account that packets that have a tiny payload need to be padded, +// such that len(payload) + packet number len >= 4 + AEAD overhead +func (p *packetPacker) shortHeaderPacketLength(connID protocol.ConnectionID, pnLen protocol.PacketNumberLen, pl payload) protocol.ByteCount { + var paddingLen protocol.ByteCount + if pl.length < 4-protocol.ByteCount(pnLen) { + paddingLen = 4 - protocol.ByteCount(pnLen) - pl.length + } + return wire.ShortHeaderLen(connID, pnLen) + pl.length + paddingLen +} + +// size is the expected size of the packet, if no padding was applied. +func (p *packetPacker) initialPaddingLen(frames []ackhandler.Frame, currentSize, maxPacketSize protocol.ByteCount) protocol.ByteCount { + // For the server, only ack-eliciting Initial packets need to be padded. + if p.perspective == protocol.PerspectiveServer && !ackhandler.HasAckElicitingFrames(frames) { + return 0 + } + if currentSize >= maxPacketSize { + return 0 + } + return maxPacketSize - currentSize +} + +// PackCoalescedPacket packs a new packet. +// It packs an Initial / Handshake if there is data to send in these packet number spaces. +// It should only be called before the handshake is confirmed. +func (p *packetPacker) PackCoalescedPacket(onlyAck bool, maxPacketSize protocol.ByteCount, v protocol.Version) (*coalescedPacket, error) { + var ( + initialHdr, handshakeHdr, zeroRTTHdr *wire.ExtendedHeader + initialPayload, handshakePayload, zeroRTTPayload, oneRTTPayload payload + oneRTTPacketNumber protocol.PacketNumber + oneRTTPacketNumberLen protocol.PacketNumberLen + ) + // Try packing an Initial packet. + initialSealer, err := p.cryptoSetup.GetInitialSealer() + if err != nil && err != handshake.ErrKeysDropped { + return nil, err + } + var size protocol.ByteCount + if initialSealer != nil { + initialHdr, initialPayload = p.maybeGetCryptoPacket(maxPacketSize-protocol.ByteCount(initialSealer.Overhead()), protocol.EncryptionInitial, onlyAck, true, v) + if initialPayload.length > 0 { + size += p.longHeaderPacketLength(initialHdr, initialPayload, v) + protocol.ByteCount(initialSealer.Overhead()) + } + } + + // Add a Handshake packet. + var handshakeSealer sealer + if (onlyAck && size == 0) || (!onlyAck && size < maxPacketSize-protocol.MinCoalescedPacketSize) { + var err error + handshakeSealer, err = p.cryptoSetup.GetHandshakeSealer() + if err != nil && err != handshake.ErrKeysDropped && err != handshake.ErrKeysNotYetAvailable { + return nil, err + } + if handshakeSealer != nil { + handshakeHdr, handshakePayload = p.maybeGetCryptoPacket(maxPacketSize-size-protocol.ByteCount(handshakeSealer.Overhead()), protocol.EncryptionHandshake, onlyAck, size == 0, v) + if handshakePayload.length > 0 { + s := p.longHeaderPacketLength(handshakeHdr, handshakePayload, v) + protocol.ByteCount(handshakeSealer.Overhead()) + size += s + } + } + } + + // Add a 0-RTT / 1-RTT packet. + var zeroRTTSealer sealer + var oneRTTSealer handshake.ShortHeaderSealer + var connID protocol.ConnectionID + var kp protocol.KeyPhaseBit + if (onlyAck && size == 0) || (!onlyAck && size < maxPacketSize-protocol.MinCoalescedPacketSize) { + var err error + oneRTTSealer, err = p.cryptoSetup.Get1RTTSealer() + if err != nil && err != handshake.ErrKeysDropped && err != handshake.ErrKeysNotYetAvailable { + return nil, err + } + if err == nil { // 1-RTT + kp = oneRTTSealer.KeyPhase() + connID = p.getDestConnID() + oneRTTPacketNumber, oneRTTPacketNumberLen = p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) + hdrLen := wire.ShortHeaderLen(connID, oneRTTPacketNumberLen) + oneRTTPayload = p.maybeGetShortHeaderPacket(oneRTTSealer, hdrLen, maxPacketSize-size, onlyAck, size == 0, v) + if oneRTTPayload.length > 0 { + size += p.shortHeaderPacketLength(connID, oneRTTPacketNumberLen, oneRTTPayload) + protocol.ByteCount(oneRTTSealer.Overhead()) + } + } else if p.perspective == protocol.PerspectiveClient && !onlyAck { // 0-RTT packets can't contain ACK frames + var err error + zeroRTTSealer, err = p.cryptoSetup.Get0RTTSealer() + if err != nil && err != handshake.ErrKeysDropped && err != handshake.ErrKeysNotYetAvailable { + return nil, err + } + if zeroRTTSealer != nil { + zeroRTTHdr, zeroRTTPayload = p.maybeGetAppDataPacketFor0RTT(zeroRTTSealer, maxPacketSize-size, v) + if zeroRTTPayload.length > 0 { + size += p.longHeaderPacketLength(zeroRTTHdr, zeroRTTPayload, v) + protocol.ByteCount(zeroRTTSealer.Overhead()) + } + } + } + } + + if initialPayload.length == 0 && handshakePayload.length == 0 && zeroRTTPayload.length == 0 && oneRTTPayload.length == 0 { + return nil, nil + } + + buffer := getPacketBuffer() + packet := &coalescedPacket{ + buffer: buffer, + longHdrPackets: make([]*longHeaderPacket, 0, 3), + } + if initialPayload.length > 0 { + padding := p.initialPaddingLen(initialPayload.frames, size, maxPacketSize) + cont, err := p.appendLongHeaderPacket(buffer, initialHdr, initialPayload, padding, protocol.EncryptionInitial, initialSealer, v) + if err != nil { + return nil, err + } + packet.longHdrPackets = append(packet.longHdrPackets, cont) + } + if handshakePayload.length > 0 { + cont, err := p.appendLongHeaderPacket(buffer, handshakeHdr, handshakePayload, 0, protocol.EncryptionHandshake, handshakeSealer, v) + if err != nil { + return nil, err + } + packet.longHdrPackets = append(packet.longHdrPackets, cont) + } + if zeroRTTPayload.length > 0 { + longHdrPacket, err := p.appendLongHeaderPacket(buffer, zeroRTTHdr, zeroRTTPayload, 0, protocol.Encryption0RTT, zeroRTTSealer, v) + if err != nil { + return nil, err + } + packet.longHdrPackets = append(packet.longHdrPackets, longHdrPacket) + } else if oneRTTPayload.length > 0 { + shp, err := p.appendShortHeaderPacket(buffer, connID, oneRTTPacketNumber, oneRTTPacketNumberLen, kp, oneRTTPayload, 0, maxPacketSize, oneRTTSealer, false, v) + if err != nil { + return nil, err + } + packet.shortHdrPacket = &shp + } + return packet, nil +} + +// PackAckOnlyPacket packs a packet containing only an ACK in the application data packet number space. +// It should be called after the handshake is confirmed. +func (p *packetPacker) PackAckOnlyPacket(maxPacketSize protocol.ByteCount, v protocol.Version) (shortHeaderPacket, *packetBuffer, error) { + buf := getPacketBuffer() + packet, err := p.appendPacket(buf, true, maxPacketSize, v) + return packet, buf, err +} + +// AppendPacket packs a packet in the application data packet number space. +// It should be called after the handshake is confirmed. +func (p *packetPacker) AppendPacket(buf *packetBuffer, maxPacketSize protocol.ByteCount, v protocol.Version) (shortHeaderPacket, error) { + return p.appendPacket(buf, false, maxPacketSize, v) +} + +func (p *packetPacker) appendPacket(buf *packetBuffer, onlyAck bool, maxPacketSize protocol.ByteCount, v protocol.Version) (shortHeaderPacket, error) { + sealer, err := p.cryptoSetup.Get1RTTSealer() + if err != nil { + return shortHeaderPacket{}, err + } + pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) + connID := p.getDestConnID() + hdrLen := wire.ShortHeaderLen(connID, pnLen) + pl := p.maybeGetShortHeaderPacket(sealer, hdrLen, maxPacketSize, onlyAck, true, v) + if pl.length == 0 { + return shortHeaderPacket{}, errNothingToPack + } + kp := sealer.KeyPhase() + + return p.appendShortHeaderPacket(buf, connID, pn, pnLen, kp, pl, 0, maxPacketSize, sealer, false, v) +} + +func (p *packetPacker) maybeGetCryptoPacket(maxPacketSize protocol.ByteCount, encLevel protocol.EncryptionLevel, onlyAck, ackAllowed bool, v protocol.Version) (*wire.ExtendedHeader, payload) { + if onlyAck { + if ack := p.acks.GetAckFrame(encLevel, true); ack != nil { + return p.getLongHeader(encLevel, v), payload{ + ack: ack, + length: ack.Length(v), + } + } + return nil, payload{} + } + + var s *cryptoStream + var handler ackhandler.FrameHandler + var hasRetransmission bool + //nolint:exhaustive // Initial and Handshake are the only two encryption levels here. + switch encLevel { + case protocol.EncryptionInitial: + s = p.initialStream + handler = p.retransmissionQueue.InitialAckHandler() + hasRetransmission = p.retransmissionQueue.HasInitialData() + case protocol.EncryptionHandshake: + s = p.handshakeStream + handler = p.retransmissionQueue.HandshakeAckHandler() + hasRetransmission = p.retransmissionQueue.HasHandshakeData() + } + + hasData := s.HasData() + var ack *wire.AckFrame + if ackAllowed { + ack = p.acks.GetAckFrame(encLevel, !hasRetransmission && !hasData) + } + if !hasData && !hasRetransmission && ack == nil { + // nothing to send + return nil, payload{} + } + + var pl payload + if ack != nil { + pl.ack = ack + pl.length = ack.Length(v) + maxPacketSize -= pl.length + } + hdr := p.getLongHeader(encLevel, v) + maxPacketSize -= hdr.GetLength(v) + if hasRetransmission { + for { + var f ackhandler.Frame + //nolint:exhaustive // 0-RTT packets can't contain any retransmission.s + switch encLevel { + case protocol.EncryptionInitial: + f.Frame = p.retransmissionQueue.GetInitialFrame(maxPacketSize, v) + f.Handler = p.retransmissionQueue.InitialAckHandler() + case protocol.EncryptionHandshake: + f.Frame = p.retransmissionQueue.GetHandshakeFrame(maxPacketSize, v) + f.Handler = p.retransmissionQueue.HandshakeAckHandler() + } + if f.Frame == nil { + break + } + pl.frames = append(pl.frames, f) + frameLen := f.Frame.Length(v) + pl.length += frameLen + maxPacketSize -= frameLen + } + } else if s.HasData() { + cf := s.PopCryptoFrame(maxPacketSize) + pl.frames = []ackhandler.Frame{{Frame: cf, Handler: handler}} + pl.length += cf.Length(v) + } + return hdr, pl +} + +func (p *packetPacker) maybeGetAppDataPacketFor0RTT(sealer sealer, maxPacketSize protocol.ByteCount, v protocol.Version) (*wire.ExtendedHeader, payload) { + if p.perspective != protocol.PerspectiveClient { + return nil, payload{} + } + + hdr := p.getLongHeader(protocol.Encryption0RTT, v) + maxPayloadSize := maxPacketSize - hdr.GetLength(v) - protocol.ByteCount(sealer.Overhead()) + return hdr, p.maybeGetAppDataPacket(maxPayloadSize, false, false, v) +} + +func (p *packetPacker) maybeGetShortHeaderPacket(sealer handshake.ShortHeaderSealer, hdrLen protocol.ByteCount, maxPacketSize protocol.ByteCount, onlyAck, ackAllowed bool, v protocol.Version) payload { + maxPayloadSize := maxPacketSize - hdrLen - protocol.ByteCount(sealer.Overhead()) + return p.maybeGetAppDataPacket(maxPayloadSize, onlyAck, ackAllowed, v) +} + +func (p *packetPacker) maybeGetAppDataPacket(maxPayloadSize protocol.ByteCount, onlyAck, ackAllowed bool, v protocol.Version) payload { + pl := p.composeNextPacket(maxPayloadSize, onlyAck, ackAllowed, v) + + // check if we have anything to send + if len(pl.frames) == 0 && len(pl.streamFrames) == 0 { + if pl.ack == nil { + return payload{} + } + // the packet only contains an ACK + if p.numNonAckElicitingAcks >= protocol.MaxNonAckElicitingAcks { + ping := &wire.PingFrame{} + pl.frames = append(pl.frames, ackhandler.Frame{Frame: ping}) + pl.length += ping.Length(v) + p.numNonAckElicitingAcks = 0 + } else { + p.numNonAckElicitingAcks++ + } + } else { + p.numNonAckElicitingAcks = 0 + } + return pl +} + +func (p *packetPacker) composeNextPacket(maxFrameSize protocol.ByteCount, onlyAck, ackAllowed bool, v protocol.Version) payload { + if onlyAck { + if ack := p.acks.GetAckFrame(protocol.Encryption1RTT, true); ack != nil { + return payload{ack: ack, length: ack.Length(v)} + } + return payload{} + } + + hasData := p.framer.HasData() + hasRetransmission := p.retransmissionQueue.HasAppData() + + var hasAck bool + var pl payload + if ackAllowed { + if ack := p.acks.GetAckFrame(protocol.Encryption1RTT, !hasRetransmission && !hasData); ack != nil { + pl.ack = ack + pl.length += ack.Length(v) + hasAck = true + } + } + + if p.datagramQueue != nil { + if f := p.datagramQueue.Peek(); f != nil { + size := f.Length(v) + if size <= maxFrameSize-pl.length { // DATAGRAM frame fits + pl.frames = append(pl.frames, ackhandler.Frame{Frame: f}) + pl.length += size + p.datagramQueue.Pop() + } else if !hasAck { + // The DATAGRAM frame doesn't fit, and the packet doesn't contain an ACK. + // Discard this frame. There's no point in retrying this in the next packet, + // as it's unlikely that the available packet size will increase. + p.datagramQueue.Pop() + } + // If the DATAGRAM frame was too large and the packet contained an ACK, we'll try to send it out later. + } + } + + if hasAck && !hasData && !hasRetransmission { + return pl + } + + if hasRetransmission { + for { + remainingLen := maxFrameSize - pl.length + if remainingLen < protocol.MinStreamFrameSize { + break + } + f := p.retransmissionQueue.GetAppDataFrame(remainingLen, v) + if f == nil { + break + } + pl.frames = append(pl.frames, ackhandler.Frame{Frame: f, Handler: p.retransmissionQueue.AppDataAckHandler()}) + pl.length += f.Length(v) + } + } + + if hasData { + var lengthAdded protocol.ByteCount + startLen := len(pl.frames) + pl.frames, lengthAdded = p.framer.AppendControlFrames(pl.frames, maxFrameSize-pl.length, v) + pl.length += lengthAdded + // add handlers for the control frames that were added + for i := startLen; i < len(pl.frames); i++ { + if pl.frames[i].Handler != nil { + continue + } + switch pl.frames[i].Frame.(type) { + case *wire.PathChallengeFrame, *wire.PathResponseFrame: + // Path probing is currently not supported, therefore we don't need to set the OnAcked callback yet. + // PATH_CHALLENGE and PATH_RESPONSE are never retransmitted. + default: + pl.frames[i].Handler = p.retransmissionQueue.AppDataAckHandler() + } + } + + pl.streamFrames, lengthAdded = p.framer.AppendStreamFrames(pl.streamFrames, maxFrameSize-pl.length, v) + pl.length += lengthAdded + } + return pl +} + +func (p *packetPacker) MaybePackProbePacket(encLevel protocol.EncryptionLevel, maxPacketSize protocol.ByteCount, v protocol.Version) (*coalescedPacket, error) { + if encLevel == protocol.Encryption1RTT { + s, err := p.cryptoSetup.Get1RTTSealer() + if err != nil { + return nil, err + } + kp := s.KeyPhase() + connID := p.getDestConnID() + pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) + hdrLen := wire.ShortHeaderLen(connID, pnLen) + pl := p.maybeGetAppDataPacket(maxPacketSize-protocol.ByteCount(s.Overhead())-hdrLen, false, true, v) + if pl.length == 0 { + return nil, nil + } + buffer := getPacketBuffer() + packet := &coalescedPacket{buffer: buffer} + shp, err := p.appendShortHeaderPacket(buffer, connID, pn, pnLen, kp, pl, 0, maxPacketSize, s, false, v) + if err != nil { + return nil, err + } + packet.shortHdrPacket = &shp + return packet, nil + } + + var hdr *wire.ExtendedHeader + var pl payload + var sealer handshake.LongHeaderSealer + //nolint:exhaustive // Probe packets are never sent for 0-RTT. + switch encLevel { + case protocol.EncryptionInitial: + var err error + sealer, err = p.cryptoSetup.GetInitialSealer() + if err != nil { + return nil, err + } + hdr, pl = p.maybeGetCryptoPacket(maxPacketSize-protocol.ByteCount(sealer.Overhead()), protocol.EncryptionInitial, false, true, v) + case protocol.EncryptionHandshake: + var err error + sealer, err = p.cryptoSetup.GetHandshakeSealer() + if err != nil { + return nil, err + } + hdr, pl = p.maybeGetCryptoPacket(maxPacketSize-protocol.ByteCount(sealer.Overhead()), protocol.EncryptionHandshake, false, true, v) + default: + panic("unknown encryption level") + } + + if pl.length == 0 { + return nil, nil + } + buffer := getPacketBuffer() + packet := &coalescedPacket{buffer: buffer} + size := p.longHeaderPacketLength(hdr, pl, v) + protocol.ByteCount(sealer.Overhead()) + var padding protocol.ByteCount + if encLevel == protocol.EncryptionInitial { + padding = p.initialPaddingLen(pl.frames, size, maxPacketSize) + } + + longHdrPacket, err := p.appendLongHeaderPacket(buffer, hdr, pl, padding, encLevel, sealer, v) + if err != nil { + return nil, err + } + packet.longHdrPackets = []*longHeaderPacket{longHdrPacket} + return packet, nil +} + +func (p *packetPacker) PackMTUProbePacket(ping ackhandler.Frame, size protocol.ByteCount, v protocol.Version) (shortHeaderPacket, *packetBuffer, error) { + pl := payload{ + frames: []ackhandler.Frame{ping}, + length: ping.Frame.Length(v), + } + buffer := getPacketBuffer() + s, err := p.cryptoSetup.Get1RTTSealer() + if err != nil { + return shortHeaderPacket{}, nil, err + } + connID := p.getDestConnID() + pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) + padding := size - p.shortHeaderPacketLength(connID, pnLen, pl) - protocol.ByteCount(s.Overhead()) + kp := s.KeyPhase() + packet, err := p.appendShortHeaderPacket(buffer, connID, pn, pnLen, kp, pl, padding, size, s, true, v) + return packet, buffer, err +} + +func (p *packetPacker) getLongHeader(encLevel protocol.EncryptionLevel, v protocol.Version) *wire.ExtendedHeader { + pn, pnLen := p.pnManager.PeekPacketNumber(encLevel) + hdr := &wire.ExtendedHeader{ + PacketNumber: pn, + PacketNumberLen: pnLen, + } + hdr.Version = v + hdr.SrcConnectionID = p.srcConnID + hdr.DestConnectionID = p.getDestConnID() + + //nolint:exhaustive // 1-RTT packets are not long header packets. + switch encLevel { + case protocol.EncryptionInitial: + hdr.Type = protocol.PacketTypeInitial + hdr.Token = p.token + case protocol.EncryptionHandshake: + hdr.Type = protocol.PacketTypeHandshake + case protocol.Encryption0RTT: + hdr.Type = protocol.PacketType0RTT + } + return hdr +} + +func (p *packetPacker) appendLongHeaderPacket(buffer *packetBuffer, header *wire.ExtendedHeader, pl payload, padding protocol.ByteCount, encLevel protocol.EncryptionLevel, sealer sealer, v protocol.Version) (*longHeaderPacket, error) { + var paddingLen protocol.ByteCount + pnLen := protocol.ByteCount(header.PacketNumberLen) + if pl.length < 4-pnLen { + paddingLen = 4 - pnLen - pl.length + } + paddingLen += padding + header.Length = pnLen + protocol.ByteCount(sealer.Overhead()) + pl.length + paddingLen + + startLen := len(buffer.Data) + raw := buffer.Data[startLen:] + raw, err := header.Append(raw, v) + if err != nil { + return nil, err + } + payloadOffset := protocol.ByteCount(len(raw)) + + raw, err = p.appendPacketPayload(raw, pl, paddingLen, v) + if err != nil { + return nil, err + } + raw = p.encryptPacket(raw, sealer, header.PacketNumber, payloadOffset, pnLen) + buffer.Data = buffer.Data[:len(buffer.Data)+len(raw)] + + if pn := p.pnManager.PopPacketNumber(encLevel); pn != header.PacketNumber { + return nil, fmt.Errorf("packetPacker BUG: Peeked and Popped packet numbers do not match: expected %d, got %d", pn, header.PacketNumber) + } + return &longHeaderPacket{ + header: header, + ack: pl.ack, + frames: pl.frames, + streamFrames: pl.streamFrames, + length: protocol.ByteCount(len(raw)), + }, nil +} + +func (p *packetPacker) appendShortHeaderPacket( + buffer *packetBuffer, + connID protocol.ConnectionID, + pn protocol.PacketNumber, + pnLen protocol.PacketNumberLen, + kp protocol.KeyPhaseBit, + pl payload, + padding, maxPacketSize protocol.ByteCount, + sealer sealer, + isMTUProbePacket bool, + v protocol.Version, +) (shortHeaderPacket, error) { + var paddingLen protocol.ByteCount + if pl.length < 4-protocol.ByteCount(pnLen) { + paddingLen = 4 - protocol.ByteCount(pnLen) - pl.length + } + paddingLen += padding + + startLen := len(buffer.Data) + raw := buffer.Data[startLen:] + raw, err := wire.AppendShortHeader(raw, connID, pn, pnLen, kp) + if err != nil { + return shortHeaderPacket{}, err + } + payloadOffset := protocol.ByteCount(len(raw)) + + raw, err = p.appendPacketPayload(raw, pl, paddingLen, v) + if err != nil { + return shortHeaderPacket{}, err + } + if !isMTUProbePacket { + if size := protocol.ByteCount(len(raw) + sealer.Overhead()); size > maxPacketSize { + return shortHeaderPacket{}, fmt.Errorf("PacketPacker BUG: packet too large (%d bytes, allowed %d bytes)", size, maxPacketSize) + } + } + raw = p.encryptPacket(raw, sealer, pn, payloadOffset, protocol.ByteCount(pnLen)) + buffer.Data = buffer.Data[:len(buffer.Data)+len(raw)] + + if newPN := p.pnManager.PopPacketNumber(protocol.Encryption1RTT); newPN != pn { + return shortHeaderPacket{}, fmt.Errorf("packetPacker BUG: Peeked and Popped packet numbers do not match: expected %d, got %d", pn, newPN) + } + return shortHeaderPacket{ + PacketNumber: pn, + PacketNumberLen: pnLen, + KeyPhase: kp, + StreamFrames: pl.streamFrames, + Frames: pl.frames, + Ack: pl.ack, + Length: protocol.ByteCount(len(raw)), + DestConnID: connID, + IsPathMTUProbePacket: isMTUProbePacket, + }, nil +} + +// appendPacketPayload serializes the payload of a packet into the raw byte slice. +// It modifies the order of payload.frames. +func (p *packetPacker) appendPacketPayload(raw []byte, pl payload, paddingLen protocol.ByteCount, v protocol.Version) ([]byte, error) { + payloadOffset := len(raw) + if pl.ack != nil { + var err error + raw, err = pl.ack.Append(raw, v) + if err != nil { + return nil, err + } + } + if paddingLen > 0 { + raw = append(raw, make([]byte, paddingLen)...) + } + // Randomize the order of the control frames. + // This makes sure that the receiver doesn't rely on the order in which frames are packed. + if len(pl.frames) > 1 { + p.rand.Shuffle(len(pl.frames), func(i, j int) { pl.frames[i], pl.frames[j] = pl.frames[j], pl.frames[i] }) + } + for _, f := range pl.frames { + var err error + raw, err = f.Frame.Append(raw, v) + if err != nil { + return nil, err + } + } + for _, f := range pl.streamFrames { + var err error + raw, err = f.Frame.Append(raw, v) + if err != nil { + return nil, err + } + } + + if payloadSize := protocol.ByteCount(len(raw)-payloadOffset) - paddingLen; payloadSize != pl.length { + return nil, fmt.Errorf("PacketPacker BUG: payload size inconsistent (expected %d, got %d bytes)", pl.length, payloadSize) + } + return raw, nil +} + +func (p *packetPacker) encryptPacket(raw []byte, sealer sealer, pn protocol.PacketNumber, payloadOffset, pnLen protocol.ByteCount) []byte { + _ = sealer.Seal(raw[payloadOffset:payloadOffset], raw[payloadOffset:], pn, raw[:payloadOffset]) + raw = raw[:len(raw)+sealer.Overhead()] + // apply header protection + pnOffset := payloadOffset - pnLen + sealer.EncryptHeader(raw[pnOffset+4:pnOffset+4+16], &raw[0], raw[pnOffset:payloadOffset]) + return raw +} + +func (p *packetPacker) SetToken(token []byte) { + p.token = token +} diff --git a/vendor/github.com/quic-go/quic-go/packet_unpacker.go b/vendor/github.com/quic-go/quic-go/packet_unpacker.go new file mode 100644 index 00000000..9e0fa9d9 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/packet_unpacker.go @@ -0,0 +1,223 @@ +package quic + +import ( + "fmt" + "time" + + "github.com/quic-go/quic-go/internal/handshake" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/wire" +) + +type headerDecryptor interface { + DecryptHeader(sample []byte, firstByte *byte, pnBytes []byte) +} + +type headerParseError struct { + err error +} + +func (e *headerParseError) Unwrap() error { + return e.err +} + +func (e *headerParseError) Error() string { + return e.err.Error() +} + +type unpackedPacket struct { + hdr *wire.ExtendedHeader + encryptionLevel protocol.EncryptionLevel + data []byte +} + +// The packetUnpacker unpacks QUIC packets. +type packetUnpacker struct { + cs handshake.CryptoSetup + + shortHdrConnIDLen int +} + +var _ unpacker = &packetUnpacker{} + +func newPacketUnpacker(cs handshake.CryptoSetup, shortHdrConnIDLen int) *packetUnpacker { + return &packetUnpacker{ + cs: cs, + shortHdrConnIDLen: shortHdrConnIDLen, + } +} + +// UnpackLongHeader unpacks a Long Header packet. +// If the reserved bits are invalid, the error is wire.ErrInvalidReservedBits. +// If any other error occurred when parsing the header, the error is of type headerParseError. +// If decrypting the payload fails for any reason, the error is the error returned by the AEAD. +func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, data []byte) (*unpackedPacket, error) { + var encLevel protocol.EncryptionLevel + var extHdr *wire.ExtendedHeader + var decrypted []byte + //nolint:exhaustive // Retry packets can't be unpacked. + switch hdr.Type { + case protocol.PacketTypeInitial: + encLevel = protocol.EncryptionInitial + opener, err := u.cs.GetInitialOpener() + if err != nil { + return nil, err + } + extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data) + if err != nil { + return nil, err + } + case protocol.PacketTypeHandshake: + encLevel = protocol.EncryptionHandshake + opener, err := u.cs.GetHandshakeOpener() + if err != nil { + return nil, err + } + extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data) + if err != nil { + return nil, err + } + case protocol.PacketType0RTT: + encLevel = protocol.Encryption0RTT + opener, err := u.cs.Get0RTTOpener() + if err != nil { + return nil, err + } + extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unknown packet type: %s", hdr.Type) + } + + if len(decrypted) == 0 { + return nil, &qerr.TransportError{ + ErrorCode: qerr.ProtocolViolation, + ErrorMessage: "empty packet", + } + } + + return &unpackedPacket{ + hdr: extHdr, + encryptionLevel: encLevel, + data: decrypted, + }, nil +} + +func (u *packetUnpacker) UnpackShortHeader(rcvTime time.Time, data []byte) (protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, []byte, error) { + opener, err := u.cs.Get1RTTOpener() + if err != nil { + return 0, 0, 0, nil, err + } + pn, pnLen, kp, decrypted, err := u.unpackShortHeaderPacket(opener, rcvTime, data) + if err != nil { + return 0, 0, 0, nil, err + } + if len(decrypted) == 0 { + return 0, 0, 0, nil, &qerr.TransportError{ + ErrorCode: qerr.ProtocolViolation, + ErrorMessage: "empty packet", + } + } + return pn, pnLen, kp, decrypted, nil +} + +func (u *packetUnpacker) unpackLongHeaderPacket(opener handshake.LongHeaderOpener, hdr *wire.Header, data []byte) (*wire.ExtendedHeader, []byte, error) { + extHdr, parseErr := u.unpackLongHeader(opener, hdr, data) + // If the reserved bits are set incorrectly, we still need to continue unpacking. + // This avoids a timing side-channel, which otherwise might allow an attacker + // to gain information about the header encryption. + if parseErr != nil && parseErr != wire.ErrInvalidReservedBits { + return nil, nil, parseErr + } + extHdrLen := extHdr.ParsedLen() + extHdr.PacketNumber = opener.DecodePacketNumber(extHdr.PacketNumber, extHdr.PacketNumberLen) + decrypted, err := opener.Open(data[extHdrLen:extHdrLen], data[extHdrLen:], extHdr.PacketNumber, data[:extHdrLen]) + if err != nil { + return nil, nil, err + } + if parseErr != nil { + return nil, nil, parseErr + } + return extHdr, decrypted, nil +} + +func (u *packetUnpacker) unpackShortHeaderPacket(opener handshake.ShortHeaderOpener, rcvTime time.Time, data []byte) (protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, []byte, error) { + l, pn, pnLen, kp, parseErr := u.unpackShortHeader(opener, data) + // If the reserved bits are set incorrectly, we still need to continue unpacking. + // This avoids a timing side-channel, which otherwise might allow an attacker + // to gain information about the header encryption. + if parseErr != nil && parseErr != wire.ErrInvalidReservedBits { + return 0, 0, 0, nil, &headerParseError{parseErr} + } + pn = opener.DecodePacketNumber(pn, pnLen) + decrypted, err := opener.Open(data[l:l], data[l:], rcvTime, pn, kp, data[:l]) + if err != nil { + return 0, 0, 0, nil, err + } + return pn, pnLen, kp, decrypted, parseErr +} + +func (u *packetUnpacker) unpackShortHeader(hd headerDecryptor, data []byte) (int, protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, error) { + hdrLen := 1 /* first header byte */ + u.shortHdrConnIDLen + if len(data) < hdrLen+4+16 { + return 0, 0, 0, 0, fmt.Errorf("packet too small, expected at least 20 bytes after the header, got %d", len(data)-hdrLen) + } + origPNBytes := make([]byte, 4) + copy(origPNBytes, data[hdrLen:hdrLen+4]) + // 2. decrypt the header, assuming a 4 byte packet number + hd.DecryptHeader( + data[hdrLen+4:hdrLen+4+16], + &data[0], + data[hdrLen:hdrLen+4], + ) + // 3. parse the header (and learn the actual length of the packet number) + l, pn, pnLen, kp, parseErr := wire.ParseShortHeader(data, u.shortHdrConnIDLen) + if parseErr != nil && parseErr != wire.ErrInvalidReservedBits { + return l, pn, pnLen, kp, parseErr + } + // 4. if the packet number is shorter than 4 bytes, replace the remaining bytes with the copy we saved earlier + if pnLen != protocol.PacketNumberLen4 { + copy(data[hdrLen+int(pnLen):hdrLen+4], origPNBytes[int(pnLen):]) + } + return l, pn, pnLen, kp, parseErr +} + +// The error is either nil, a wire.ErrInvalidReservedBits or of type headerParseError. +func (u *packetUnpacker) unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte) (*wire.ExtendedHeader, error) { + extHdr, err := unpackLongHeader(hd, hdr, data) + if err != nil && err != wire.ErrInvalidReservedBits { + return nil, &headerParseError{err: err} + } + return extHdr, err +} + +func unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte) (*wire.ExtendedHeader, error) { + hdrLen := hdr.ParsedLen() + if protocol.ByteCount(len(data)) < hdrLen+4+16 { + //nolint:stylecheck + return nil, fmt.Errorf("Packet too small. Expected at least 20 bytes after the header, got %d", protocol.ByteCount(len(data))-hdrLen) + } + // The packet number can be up to 4 bytes long, but we won't know the length until we decrypt it. + // 1. save a copy of the 4 bytes + origPNBytes := make([]byte, 4) + copy(origPNBytes, data[hdrLen:hdrLen+4]) + // 2. decrypt the header, assuming a 4 byte packet number + hd.DecryptHeader( + data[hdrLen+4:hdrLen+4+16], + &data[0], + data[hdrLen:hdrLen+4], + ) + // 3. parse the header (and learn the actual length of the packet number) + extHdr, parseErr := hdr.ParseExtended(data) + if parseErr != nil && parseErr != wire.ErrInvalidReservedBits { + return nil, parseErr + } + // 4. if the packet number is shorter than 4 bytes, replace the remaining bytes with the copy we saved earlier + if extHdr.PacketNumberLen != protocol.PacketNumberLen4 { + copy(data[extHdr.ParsedLen():hdrLen+4], origPNBytes[int(extHdr.PacketNumberLen):]) + } + return extHdr, parseErr +} diff --git a/vendor/github.com/quic-go/quic-go/quicvarint/io.go b/vendor/github.com/quic-go/quic-go/quicvarint/io.go new file mode 100644 index 00000000..9368d1c5 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/quicvarint/io.go @@ -0,0 +1,68 @@ +package quicvarint + +import ( + "bytes" + "io" +) + +// Reader implements both the io.ByteReader and io.Reader interfaces. +type Reader interface { + io.ByteReader + io.Reader +} + +var _ Reader = &bytes.Reader{} + +type byteReader struct { + io.Reader +} + +var _ Reader = &byteReader{} + +// NewReader returns a Reader for r. +// If r already implements both io.ByteReader and io.Reader, NewReader returns r. +// Otherwise, r is wrapped to add the missing interfaces. +func NewReader(r io.Reader) Reader { + if r, ok := r.(Reader); ok { + return r + } + return &byteReader{r} +} + +func (r *byteReader) ReadByte() (byte, error) { + var b [1]byte + n, err := r.Reader.Read(b[:]) + if n == 1 && err == io.EOF { + err = nil + } + return b[0], err +} + +// Writer implements both the io.ByteWriter and io.Writer interfaces. +type Writer interface { + io.ByteWriter + io.Writer +} + +var _ Writer = &bytes.Buffer{} + +type byteWriter struct { + io.Writer +} + +var _ Writer = &byteWriter{} + +// NewWriter returns a Writer for w. +// If r already implements both io.ByteWriter and io.Writer, NewWriter returns w. +// Otherwise, w is wrapped to add the missing interfaces. +func NewWriter(w io.Writer) Writer { + if w, ok := w.(Writer); ok { + return w + } + return &byteWriter{w} +} + +func (w *byteWriter) WriteByte(c byte) error { + _, err := w.Writer.Write([]byte{c}) + return err +} diff --git a/vendor/github.com/quic-go/quic-go/quicvarint/varint.go b/vendor/github.com/quic-go/quic-go/quicvarint/varint.go new file mode 100644 index 00000000..9a22e334 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/quicvarint/varint.go @@ -0,0 +1,164 @@ +package quicvarint + +import ( + "fmt" + "io" +) + +// taken from the QUIC draft +const ( + // Min is the minimum value allowed for a QUIC varint. + Min = 0 + + // Max is the maximum allowed value for a QUIC varint (2^62-1). + Max = maxVarInt8 + + maxVarInt1 = 63 + maxVarInt2 = 16383 + maxVarInt4 = 1073741823 + maxVarInt8 = 4611686018427387903 +) + +// Read reads a number in the QUIC varint format from r. +func Read(r io.ByteReader) (uint64, error) { + firstByte, err := r.ReadByte() + if err != nil { + return 0, err + } + // the first two bits of the first byte encode the length + l := 1 << ((firstByte & 0xc0) >> 6) + b1 := firstByte & (0xff - 0xc0) + if l == 1 { + return uint64(b1), nil + } + b2, err := r.ReadByte() + if err != nil { + return 0, err + } + if l == 2 { + return uint64(b2) + uint64(b1)<<8, nil + } + b3, err := r.ReadByte() + if err != nil { + return 0, err + } + b4, err := r.ReadByte() + if err != nil { + return 0, err + } + if l == 4 { + return uint64(b4) + uint64(b3)<<8 + uint64(b2)<<16 + uint64(b1)<<24, nil + } + b5, err := r.ReadByte() + if err != nil { + return 0, err + } + b6, err := r.ReadByte() + if err != nil { + return 0, err + } + b7, err := r.ReadByte() + if err != nil { + return 0, err + } + b8, err := r.ReadByte() + if err != nil { + return 0, err + } + return uint64(b8) + uint64(b7)<<8 + uint64(b6)<<16 + uint64(b5)<<24 + uint64(b4)<<32 + uint64(b3)<<40 + uint64(b2)<<48 + uint64(b1)<<56, nil +} + +// Parse reads a number in the QUIC varint format. +// It returns the number of bytes consumed. +func Parse(b []byte) (uint64 /* value */, int /* bytes consumed */, error) { + if len(b) == 0 { + return 0, 0, io.EOF + } + firstByte := b[0] + // the first two bits of the first byte encode the length + l := 1 << ((firstByte & 0xc0) >> 6) + if len(b) < l { + return 0, 0, io.ErrUnexpectedEOF + } + b0 := firstByte & (0xff - 0xc0) + if l == 1 { + return uint64(b0), 1, nil + } + if l == 2 { + return uint64(b[1]) + uint64(b0)<<8, 2, nil + } + if l == 4 { + return uint64(b[3]) + uint64(b[2])<<8 + uint64(b[1])<<16 + uint64(b0)<<24, 4, nil + } + return uint64(b[7]) + uint64(b[6])<<8 + uint64(b[5])<<16 + uint64(b[4])<<24 + uint64(b[3])<<32 + uint64(b[2])<<40 + uint64(b[1])<<48 + uint64(b0)<<56, 8, nil +} + +// Append appends i in the QUIC varint format. +func Append(b []byte, i uint64) []byte { + if i <= maxVarInt1 { + return append(b, uint8(i)) + } + if i <= maxVarInt2 { + return append(b, []byte{uint8(i>>8) | 0x40, uint8(i)}...) + } + if i <= maxVarInt4 { + return append(b, []byte{uint8(i>>24) | 0x80, uint8(i >> 16), uint8(i >> 8), uint8(i)}...) + } + if i <= maxVarInt8 { + return append(b, []byte{ + uint8(i>>56) | 0xc0, uint8(i >> 48), uint8(i >> 40), uint8(i >> 32), + uint8(i >> 24), uint8(i >> 16), uint8(i >> 8), uint8(i), + }...) + } + panic(fmt.Sprintf("%#x doesn't fit into 62 bits", i)) +} + +// AppendWithLen append i in the QUIC varint format with the desired length. +func AppendWithLen(b []byte, i uint64, length int) []byte { + if length != 1 && length != 2 && length != 4 && length != 8 { + panic("invalid varint length") + } + l := Len(i) + if l == length { + return Append(b, i) + } + if l > length { + panic(fmt.Sprintf("cannot encode %d in %d bytes", i, length)) + } + if length == 2 { + b = append(b, 0b01000000) + } else if length == 4 { + b = append(b, 0b10000000) + } else if length == 8 { + b = append(b, 0b11000000) + } + for j := 1; j < length-l; j++ { + b = append(b, 0) + } + for j := 0; j < l; j++ { + b = append(b, uint8(i>>(8*(l-1-j)))) + } + return b +} + +// Len determines the number of bytes that will be needed to write the number i. +func Len(i uint64) int { + if i <= maxVarInt1 { + return 1 + } + if i <= maxVarInt2 { + return 2 + } + if i <= maxVarInt4 { + return 4 + } + if i <= maxVarInt8 { + return 8 + } + // Don't use a fmt.Sprintf here to format the error message. + // The function would then exceed the inlining budget. + panic(struct { + message string + num uint64 + }{"value doesn't fit into 62 bits: ", i}) +} diff --git a/vendor/github.com/quic-go/quic-go/receive_stream.go b/vendor/github.com/quic-go/quic-go/receive_stream.go new file mode 100644 index 00000000..80340923 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/receive_stream.go @@ -0,0 +1,377 @@ +package quic + +import ( + "fmt" + "io" + "sync" + "time" + + "github.com/quic-go/quic-go/internal/ackhandler" + "github.com/quic-go/quic-go/internal/flowcontrol" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" +) + +type receiveStreamI interface { + ReceiveStream + + handleStreamFrame(*wire.StreamFrame) error + handleResetStreamFrame(*wire.ResetStreamFrame) error + closeForShutdown(error) +} + +type receiveStream struct { + mutex sync.Mutex + + streamID protocol.StreamID + + sender streamSender + + frameQueue *frameSorter + finalOffset protocol.ByteCount + + currentFrame []byte + currentFrameDone func() + readPosInFrame int + currentFrameIsLast bool // is the currentFrame the last frame on this stream + + queuedStopSending bool + queuedMaxStreamData bool + + // Set once we read the io.EOF or the cancellation error. + // Note that for local cancellations, this doesn't necessarily mean that we know the final offset yet. + errorRead bool + completed bool // set once we've called streamSender.onStreamCompleted + cancelledRemotely bool + cancelledLocally bool + cancelErr *StreamError + closeForShutdownErr error + + readChan chan struct{} + readOnce chan struct{} // cap: 1, to protect against concurrent use of Read + deadline time.Time + + flowController flowcontrol.StreamFlowController +} + +var ( + _ ReceiveStream = &receiveStream{} + _ receiveStreamI = &receiveStream{} + _ streamControlFrameGetter = &receiveStream{} +) + +func newReceiveStream( + streamID protocol.StreamID, + sender streamSender, + flowController flowcontrol.StreamFlowController, +) *receiveStream { + return &receiveStream{ + streamID: streamID, + sender: sender, + flowController: flowController, + frameQueue: newFrameSorter(), + readChan: make(chan struct{}, 1), + readOnce: make(chan struct{}, 1), + finalOffset: protocol.MaxByteCount, + } +} + +func (s *receiveStream) StreamID() protocol.StreamID { + return s.streamID +} + +// Read implements io.Reader. It is not thread safe! +func (s *receiveStream) Read(p []byte) (int, error) { + // Concurrent use of Read is not permitted (and doesn't make any sense), + // but sometimes people do it anyway. + // Make sure that we only execute one call at any given time to avoid hard to debug failures. + s.readOnce <- struct{}{} + defer func() { <-s.readOnce }() + + s.mutex.Lock() + queuedNewControlFrame, n, err := s.readImpl(p) + completed := s.isNewlyCompleted() + s.mutex.Unlock() + + if completed { + s.sender.onStreamCompleted(s.streamID) + } + if queuedNewControlFrame { + s.sender.onHasStreamControlFrame(s.streamID, s) + } + return n, err +} + +func (s *receiveStream) isNewlyCompleted() bool { + if s.completed { + return false + } + // We need to know the final offset (either via FIN or RESET_STREAM) for flow control accounting. + if s.finalOffset == protocol.MaxByteCount { + return false + } + // We're done with the stream if it was cancelled locally... + if s.cancelledLocally { + s.completed = true + return true + } + // ... or if the error (either io.EOF or the reset error) was read + if s.errorRead { + s.completed = true + return true + } + return false +} + +func (s *receiveStream) readImpl(p []byte) (bool, int, error) { + if s.currentFrameIsLast && s.currentFrame == nil { + s.errorRead = true + return false, 0, io.EOF + } + if s.cancelledRemotely || s.cancelledLocally { + s.errorRead = true + return false, 0, s.cancelErr + } + if s.closeForShutdownErr != nil { + return false, 0, s.closeForShutdownErr + } + + var queuedNewControlFrame bool + var bytesRead int + var deadlineTimer *utils.Timer + for bytesRead < len(p) { + if s.currentFrame == nil || s.readPosInFrame >= len(s.currentFrame) { + s.dequeueNextFrame() + } + if s.currentFrame == nil && bytesRead > 0 { + return queuedNewControlFrame, bytesRead, s.closeForShutdownErr + } + + for { + // Stop waiting on errors + if s.closeForShutdownErr != nil { + return queuedNewControlFrame, bytesRead, s.closeForShutdownErr + } + if s.cancelledRemotely || s.cancelledLocally { + s.errorRead = true + return queuedNewControlFrame, 0, s.cancelErr + } + + deadline := s.deadline + if !deadline.IsZero() { + if !time.Now().Before(deadline) { + return queuedNewControlFrame, bytesRead, errDeadline + } + if deadlineTimer == nil { + deadlineTimer = utils.NewTimer() + defer deadlineTimer.Stop() + } + deadlineTimer.Reset(deadline) + } + + if s.currentFrame != nil || s.currentFrameIsLast { + break + } + + s.mutex.Unlock() + if deadline.IsZero() { + <-s.readChan + } else { + select { + case <-s.readChan: + case <-deadlineTimer.Chan(): + deadlineTimer.SetRead() + } + } + s.mutex.Lock() + if s.currentFrame == nil { + s.dequeueNextFrame() + } + } + + if bytesRead > len(p) { + return queuedNewControlFrame, bytesRead, fmt.Errorf("BUG: bytesRead (%d) > len(p) (%d) in stream.Read", bytesRead, len(p)) + } + if s.readPosInFrame > len(s.currentFrame) { + return queuedNewControlFrame, bytesRead, fmt.Errorf("BUG: readPosInFrame (%d) > frame.DataLen (%d) in stream.Read", s.readPosInFrame, len(s.currentFrame)) + } + + m := copy(p[bytesRead:], s.currentFrame[s.readPosInFrame:]) + s.readPosInFrame += m + bytesRead += m + + // when a RESET_STREAM was received, the flow controller was already + // informed about the final byteOffset for this stream + if !s.cancelledRemotely { + if queueMaxStreamData := s.flowController.AddBytesRead(protocol.ByteCount(m)); queueMaxStreamData { + s.queuedMaxStreamData = true + queuedNewControlFrame = true + } + } + + if s.readPosInFrame >= len(s.currentFrame) && s.currentFrameIsLast { + s.currentFrame = nil + if s.currentFrameDone != nil { + s.currentFrameDone() + } + s.errorRead = true + return queuedNewControlFrame, bytesRead, io.EOF + } + } + return queuedNewControlFrame, bytesRead, nil +} + +func (s *receiveStream) dequeueNextFrame() { + var offset protocol.ByteCount + // We're done with the last frame. Release the buffer. + if s.currentFrameDone != nil { + s.currentFrameDone() + } + offset, s.currentFrame, s.currentFrameDone = s.frameQueue.Pop() + s.currentFrameIsLast = offset+protocol.ByteCount(len(s.currentFrame)) >= s.finalOffset + s.readPosInFrame = 0 +} + +func (s *receiveStream) CancelRead(errorCode StreamErrorCode) { + s.mutex.Lock() + queuedNewControlFrame := s.cancelReadImpl(errorCode) + completed := s.isNewlyCompleted() + s.mutex.Unlock() + + if queuedNewControlFrame { + s.sender.onHasStreamControlFrame(s.streamID, s) + } + if completed { + s.flowController.Abandon() + s.sender.onStreamCompleted(s.streamID) + } +} + +func (s *receiveStream) cancelReadImpl(errorCode qerr.StreamErrorCode) (queuedNewControlFrame bool) { + if s.cancelledLocally { // duplicate call to CancelRead + return false + } + s.cancelledLocally = true + if s.errorRead || s.cancelledRemotely { + return false + } + s.queuedStopSending = true + s.cancelErr = &StreamError{StreamID: s.streamID, ErrorCode: errorCode, Remote: false} + s.signalRead() + return true +} + +func (s *receiveStream) handleStreamFrame(frame *wire.StreamFrame) error { + s.mutex.Lock() + err := s.handleStreamFrameImpl(frame) + completed := s.isNewlyCompleted() + s.mutex.Unlock() + + if completed { + s.flowController.Abandon() + s.sender.onStreamCompleted(s.streamID) + } + return err +} + +func (s *receiveStream) handleStreamFrameImpl(frame *wire.StreamFrame) error { + maxOffset := frame.Offset + frame.DataLen() + if err := s.flowController.UpdateHighestReceived(maxOffset, frame.Fin); err != nil { + return err + } + if frame.Fin { + s.finalOffset = maxOffset + } + if s.cancelledLocally { + return nil + } + if err := s.frameQueue.Push(frame.Data, frame.Offset, frame.PutBack); err != nil { + return err + } + s.signalRead() + return nil +} + +func (s *receiveStream) handleResetStreamFrame(frame *wire.ResetStreamFrame) error { + s.mutex.Lock() + err := s.handleResetStreamFrameImpl(frame) + completed := s.isNewlyCompleted() + s.mutex.Unlock() + + if completed { + s.sender.onStreamCompleted(s.streamID) + } + return err +} + +func (s *receiveStream) handleResetStreamFrameImpl(frame *wire.ResetStreamFrame) error { + if s.closeForShutdownErr != nil { + return nil + } + if err := s.flowController.UpdateHighestReceived(frame.FinalSize, true); err != nil { + return err + } + s.finalOffset = frame.FinalSize + + // ignore duplicate RESET_STREAM frames for this stream (after checking their final offset) + if s.cancelledRemotely { + return nil + } + s.flowController.Abandon() + // don't save the error if the RESET_STREAM frames was received after CancelRead was called + if s.cancelledLocally { + return nil + } + s.cancelledRemotely = true + s.cancelErr = &StreamError{StreamID: s.streamID, ErrorCode: frame.ErrorCode, Remote: true} + s.signalRead() + return nil +} + +func (s *receiveStream) getControlFrame() (_ ackhandler.Frame, ok, hasMore bool) { + s.mutex.Lock() + defer s.mutex.Unlock() + + if !s.queuedStopSending && !s.queuedMaxStreamData { + return ackhandler.Frame{}, false, false + } + if s.queuedStopSending { + s.queuedStopSending = false + return ackhandler.Frame{ + Frame: &wire.StopSendingFrame{StreamID: s.streamID, ErrorCode: s.cancelErr.ErrorCode}, + }, true, s.queuedMaxStreamData + } + + s.queuedMaxStreamData = false + return ackhandler.Frame{ + Frame: &wire.MaxStreamDataFrame{StreamID: s.streamID, MaximumStreamData: s.flowController.GetWindowUpdate()}, + }, true, false +} + +func (s *receiveStream) SetReadDeadline(t time.Time) error { + s.mutex.Lock() + s.deadline = t + s.mutex.Unlock() + s.signalRead() + return nil +} + +// CloseForShutdown closes a stream abruptly. +// It makes Read unblock (and return the error) immediately. +// The peer will NOT be informed about this: the stream is closed without sending a FIN or RESET. +func (s *receiveStream) closeForShutdown(err error) { + s.mutex.Lock() + s.closeForShutdownErr = err + s.mutex.Unlock() + s.signalRead() +} + +// signalRead performs a non-blocking send on the readChan +func (s *receiveStream) signalRead() { + select { + case s.readChan <- struct{}{}: + default: + } +} diff --git a/vendor/github.com/quic-go/quic-go/retransmission_queue.go b/vendor/github.com/quic-go/quic-go/retransmission_queue.go new file mode 100644 index 00000000..14ae1e3b --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/retransmission_queue.go @@ -0,0 +1,180 @@ +package quic + +import ( + "fmt" + + "github.com/quic-go/quic-go/internal/ackhandler" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/wire" +) + +type retransmissionQueue struct { + initial []wire.Frame + initialCryptoData []*wire.CryptoFrame + + handshake []wire.Frame + handshakeCryptoData []*wire.CryptoFrame + + appData []wire.Frame +} + +func newRetransmissionQueue() *retransmissionQueue { + return &retransmissionQueue{} +} + +// AddPing queues a ping. +// It is used when a probe packet needs to be sent +func (q *retransmissionQueue) AddPing(encLevel protocol.EncryptionLevel) { + //nolint:exhaustive // Cannot send probe packets for 0-RTT. + switch encLevel { + case protocol.EncryptionInitial: + q.addInitial(&wire.PingFrame{}) + case protocol.EncryptionHandshake: + q.addHandshake(&wire.PingFrame{}) + case protocol.Encryption1RTT: + q.addAppData(&wire.PingFrame{}) + default: + panic("unexpected encryption level") + } +} + +func (q *retransmissionQueue) addInitial(f wire.Frame) { + if cf, ok := f.(*wire.CryptoFrame); ok { + q.initialCryptoData = append(q.initialCryptoData, cf) + return + } + q.initial = append(q.initial, f) +} + +func (q *retransmissionQueue) addHandshake(f wire.Frame) { + if cf, ok := f.(*wire.CryptoFrame); ok { + q.handshakeCryptoData = append(q.handshakeCryptoData, cf) + return + } + q.handshake = append(q.handshake, f) +} + +func (q *retransmissionQueue) HasInitialData() bool { + return len(q.initialCryptoData) > 0 || len(q.initial) > 0 +} + +func (q *retransmissionQueue) HasHandshakeData() bool { + return len(q.handshakeCryptoData) > 0 || len(q.handshake) > 0 +} + +func (q *retransmissionQueue) HasAppData() bool { + return len(q.appData) > 0 +} + +func (q *retransmissionQueue) addAppData(f wire.Frame) { + if _, ok := f.(*wire.StreamFrame); ok { + panic("STREAM frames are handled with their respective streams.") + } + q.appData = append(q.appData, f) +} + +func (q *retransmissionQueue) GetInitialFrame(maxLen protocol.ByteCount, v protocol.Version) wire.Frame { + if len(q.initialCryptoData) > 0 { + f := q.initialCryptoData[0] + newFrame, needsSplit := f.MaybeSplitOffFrame(maxLen, v) + if newFrame == nil && !needsSplit { // the whole frame fits + q.initialCryptoData = q.initialCryptoData[1:] + return f + } + if newFrame != nil { // frame was split. Leave the original frame in the queue. + return newFrame + } + } + if len(q.initial) == 0 { + return nil + } + f := q.initial[0] + if f.Length(v) > maxLen { + return nil + } + q.initial = q.initial[1:] + return f +} + +func (q *retransmissionQueue) GetHandshakeFrame(maxLen protocol.ByteCount, v protocol.Version) wire.Frame { + if len(q.handshakeCryptoData) > 0 { + f := q.handshakeCryptoData[0] + newFrame, needsSplit := f.MaybeSplitOffFrame(maxLen, v) + if newFrame == nil && !needsSplit { // the whole frame fits + q.handshakeCryptoData = q.handshakeCryptoData[1:] + return f + } + if newFrame != nil { // frame was split. Leave the original frame in the queue. + return newFrame + } + } + if len(q.handshake) == 0 { + return nil + } + f := q.handshake[0] + if f.Length(v) > maxLen { + return nil + } + q.handshake = q.handshake[1:] + return f +} + +func (q *retransmissionQueue) GetAppDataFrame(maxLen protocol.ByteCount, v protocol.Version) wire.Frame { + if len(q.appData) == 0 { + return nil + } + f := q.appData[0] + if f.Length(v) > maxLen { + return nil + } + q.appData = q.appData[1:] + return f +} + +func (q *retransmissionQueue) DropPackets(encLevel protocol.EncryptionLevel) { + //nolint:exhaustive // Can only drop Initial and Handshake packet number space. + switch encLevel { + case protocol.EncryptionInitial: + q.initial = nil + q.initialCryptoData = nil + case protocol.EncryptionHandshake: + q.handshake = nil + q.handshakeCryptoData = nil + default: + panic(fmt.Sprintf("unexpected encryption level: %s", encLevel)) + } +} + +func (q *retransmissionQueue) InitialAckHandler() ackhandler.FrameHandler { + return (*retransmissionQueueInitialAckHandler)(q) +} + +func (q *retransmissionQueue) HandshakeAckHandler() ackhandler.FrameHandler { + return (*retransmissionQueueHandshakeAckHandler)(q) +} + +func (q *retransmissionQueue) AppDataAckHandler() ackhandler.FrameHandler { + return (*retransmissionQueueAppDataAckHandler)(q) +} + +type retransmissionQueueInitialAckHandler retransmissionQueue + +func (q *retransmissionQueueInitialAckHandler) OnAcked(wire.Frame) {} +func (q *retransmissionQueueInitialAckHandler) OnLost(f wire.Frame) { + (*retransmissionQueue)(q).addInitial(f) +} + +type retransmissionQueueHandshakeAckHandler retransmissionQueue + +func (q *retransmissionQueueHandshakeAckHandler) OnAcked(wire.Frame) {} +func (q *retransmissionQueueHandshakeAckHandler) OnLost(f wire.Frame) { + (*retransmissionQueue)(q).addHandshake(f) +} + +type retransmissionQueueAppDataAckHandler retransmissionQueue + +func (q *retransmissionQueueAppDataAckHandler) OnAcked(wire.Frame) {} +func (q *retransmissionQueueAppDataAckHandler) OnLost(f wire.Frame) { + (*retransmissionQueue)(q).addAppData(f) +} diff --git a/vendor/github.com/quic-go/quic-go/send_conn.go b/vendor/github.com/quic-go/quic-go/send_conn.go new file mode 100644 index 00000000..498ed112 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/send_conn.go @@ -0,0 +1,103 @@ +package quic + +import ( + "net" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" +) + +// A sendConn allows sending using a simple Write() on a non-connected packet conn. +type sendConn interface { + Write(b []byte, gsoSize uint16, ecn protocol.ECN) error + Close() error + LocalAddr() net.Addr + RemoteAddr() net.Addr + + capabilities() connCapabilities +} + +type sconn struct { + rawConn + + localAddr net.Addr + remoteAddr net.Addr + + logger utils.Logger + + packetInfoOOB []byte + // If GSO enabled, and we receive a GSO error for this remote address, GSO is disabled. + gotGSOError bool + // Used to catch the error sometimes returned by the first sendmsg call on Linux, + // see https://github.com/golang/go/issues/63322. + wroteFirstPacket bool +} + +var _ sendConn = &sconn{} + +func newSendConn(c rawConn, remote net.Addr, info packetInfo, logger utils.Logger) *sconn { + localAddr := c.LocalAddr() + if info.addr.IsValid() { + if udpAddr, ok := localAddr.(*net.UDPAddr); ok { + addrCopy := *udpAddr + addrCopy.IP = info.addr.AsSlice() + localAddr = &addrCopy + } + } + + oob := info.OOB() + // increase oob slice capacity, so we can add the UDP_SEGMENT and ECN control messages without allocating + l := len(oob) + oob = append(oob, make([]byte, 64)...)[:l] + return &sconn{ + rawConn: c, + localAddr: localAddr, + remoteAddr: remote, + packetInfoOOB: oob, + logger: logger, + } +} + +func (c *sconn) Write(p []byte, gsoSize uint16, ecn protocol.ECN) error { + err := c.writePacket(p, c.remoteAddr, c.packetInfoOOB, gsoSize, ecn) + if err != nil && isGSOError(err) { + // disable GSO for future calls + c.gotGSOError = true + if c.logger.Debug() { + c.logger.Debugf("GSO failed when sending to %s", c.remoteAddr) + } + // send out the packets one by one + for len(p) > 0 { + l := len(p) + if l > int(gsoSize) { + l = int(gsoSize) + } + if err := c.writePacket(p[:l], c.remoteAddr, c.packetInfoOOB, 0, ecn); err != nil { + return err + } + p = p[l:] + } + return nil + } + return err +} + +func (c *sconn) writePacket(p []byte, addr net.Addr, oob []byte, gsoSize uint16, ecn protocol.ECN) error { + _, err := c.WritePacket(p, addr, oob, gsoSize, ecn) + if err != nil && !c.wroteFirstPacket && isPermissionError(err) { + _, err = c.WritePacket(p, addr, oob, gsoSize, ecn) + } + c.wroteFirstPacket = true + return err +} + +func (c *sconn) capabilities() connCapabilities { + capabilities := c.rawConn.capabilities() + if capabilities.GSO { + capabilities.GSO = !c.gotGSOError + } + return capabilities +} + +func (c *sconn) RemoteAddr() net.Addr { return c.remoteAddr } +func (c *sconn) LocalAddr() net.Addr { return c.localAddr } diff --git a/vendor/github.com/quic-go/quic-go/send_queue.go b/vendor/github.com/quic-go/quic-go/send_queue.go new file mode 100644 index 00000000..bde02334 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/send_queue.go @@ -0,0 +1,103 @@ +package quic + +import "github.com/quic-go/quic-go/internal/protocol" + +type sender interface { + Send(p *packetBuffer, gsoSize uint16, ecn protocol.ECN) + Run() error + WouldBlock() bool + Available() <-chan struct{} + Close() +} + +type queueEntry struct { + buf *packetBuffer + gsoSize uint16 + ecn protocol.ECN +} + +type sendQueue struct { + queue chan queueEntry + closeCalled chan struct{} // runStopped when Close() is called + runStopped chan struct{} // runStopped when the run loop returns + available chan struct{} + conn sendConn +} + +var _ sender = &sendQueue{} + +const sendQueueCapacity = 8 + +func newSendQueue(conn sendConn) sender { + return &sendQueue{ + conn: conn, + runStopped: make(chan struct{}), + closeCalled: make(chan struct{}), + available: make(chan struct{}, 1), + queue: make(chan queueEntry, sendQueueCapacity), + } +} + +// Send sends out a packet. It's guaranteed to not block. +// Callers need to make sure that there's actually space in the send queue by calling WouldBlock. +// Otherwise Send will panic. +func (h *sendQueue) Send(p *packetBuffer, gsoSize uint16, ecn protocol.ECN) { + select { + case h.queue <- queueEntry{buf: p, gsoSize: gsoSize, ecn: ecn}: + // clear available channel if we've reached capacity + if len(h.queue) == sendQueueCapacity { + select { + case <-h.available: + default: + } + } + case <-h.runStopped: + default: + panic("sendQueue.Send would have blocked") + } +} + +func (h *sendQueue) WouldBlock() bool { + return len(h.queue) == sendQueueCapacity +} + +func (h *sendQueue) Available() <-chan struct{} { + return h.available +} + +func (h *sendQueue) Run() error { + defer close(h.runStopped) + var shouldClose bool + for { + if shouldClose && len(h.queue) == 0 { + return nil + } + select { + case <-h.closeCalled: + h.closeCalled = nil // prevent this case from being selected again + // make sure that all queued packets are actually sent out + shouldClose = true + case e := <-h.queue: + if err := h.conn.Write(e.buf.Data, e.gsoSize, e.ecn); err != nil { + // This additional check enables: + // 1. Checking for "datagram too large" message from the kernel, as such, + // 2. Path MTU discovery,and + // 3. Eventual detection of loss PingFrame. + if !isSendMsgSizeErr(err) { + return err + } + } + e.buf.Release() + select { + case h.available <- struct{}{}: + default: + } + } + } +} + +func (h *sendQueue) Close() { + close(h.closeCalled) + // wait until the run loop returned + <-h.runStopped +} diff --git a/vendor/github.com/quic-go/quic-go/send_stream.go b/vendor/github.com/quic-go/quic-go/send_stream.go new file mode 100644 index 00000000..bcaf2abf --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/send_stream.go @@ -0,0 +1,593 @@ +package quic + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/quic-go/quic-go/internal/ackhandler" + "github.com/quic-go/quic-go/internal/flowcontrol" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" +) + +type sendStreamI interface { + SendStream + handleStopSendingFrame(*wire.StopSendingFrame) + hasData() bool + popStreamFrame(maxBytes protocol.ByteCount, v protocol.Version) (frame ackhandler.StreamFrame, ok, hasMore bool) + closeForShutdown(error) + updateSendWindow(protocol.ByteCount) +} + +type sendStream struct { + mutex sync.Mutex + + numOutstandingFrames int64 // outstanding STREAM and RESET_STREAM frames + retransmissionQueue []*wire.StreamFrame + + ctx context.Context + ctxCancel context.CancelCauseFunc + + streamID protocol.StreamID + sender streamSender + + writeOffset protocol.ByteCount + + cancelWriteErr *StreamError + closeForShutdownErr error + + queuedResetStreamFrame bool + queuedBlockedFrame bool + + finishedWriting bool // set once Close() is called + finSent bool // set when a STREAM_FRAME with FIN bit has been sent + // Set when the application knows about the cancellation. + // This can happen because the application called CancelWrite, + // or because Write returned the error (for remote cancellations). + cancellationFlagged bool + completed bool // set when this stream has been reported to the streamSender as completed + + dataForWriting []byte // during a Write() call, this slice is the part of p that still needs to be sent out + nextFrame *wire.StreamFrame + + writeChan chan struct{} + writeOnce chan struct{} + deadline time.Time + + flowController flowcontrol.StreamFlowController +} + +var ( + _ SendStream = &sendStream{} + _ sendStreamI = &sendStream{} + _ streamControlFrameGetter = &sendStream{} +) + +func newSendStream( + ctx context.Context, + streamID protocol.StreamID, + sender streamSender, + flowController flowcontrol.StreamFlowController, +) *sendStream { + s := &sendStream{ + streamID: streamID, + sender: sender, + flowController: flowController, + writeChan: make(chan struct{}, 1), + writeOnce: make(chan struct{}, 1), // cap: 1, to protect against concurrent use of Write + } + s.ctx, s.ctxCancel = context.WithCancelCause(ctx) + return s +} + +func (s *sendStream) StreamID() protocol.StreamID { + return s.streamID // same for receiveStream and sendStream +} + +func (s *sendStream) Write(p []byte) (int, error) { + // Concurrent use of Write is not permitted (and doesn't make any sense), + // but sometimes people do it anyway. + // Make sure that we only execute one call at any given time to avoid hard to debug failures. + s.writeOnce <- struct{}{} + defer func() { <-s.writeOnce }() + + isNewlyCompleted, n, err := s.write(p) + if isNewlyCompleted { + s.sender.onStreamCompleted(s.streamID) + } + return n, err +} + +func (s *sendStream) write(p []byte) (bool /* is newly completed */, int, error) { + s.mutex.Lock() + defer s.mutex.Unlock() + + if s.finishedWriting { + return false, 0, fmt.Errorf("write on closed stream %d", s.streamID) + } + if s.cancelWriteErr != nil { + s.cancellationFlagged = true + return s.isNewlyCompleted(), 0, s.cancelWriteErr + } + if s.closeForShutdownErr != nil { + return false, 0, s.closeForShutdownErr + } + if !s.deadline.IsZero() && !time.Now().Before(s.deadline) { + return false, 0, errDeadline + } + if len(p) == 0 { + return false, 0, nil + } + + s.dataForWriting = p + + var ( + deadlineTimer *utils.Timer + bytesWritten int + notifiedSender bool + ) + for { + var copied bool + var deadline time.Time + // As soon as dataForWriting becomes smaller than a certain size x, we copy all the data to a STREAM frame (s.nextFrame), + // which can then be popped the next time we assemble a packet. + // This allows us to return Write() when all data but x bytes have been sent out. + // When the user now calls Close(), this is much more likely to happen before we popped that last STREAM frame, + // allowing us to set the FIN bit on that frame (instead of sending an empty STREAM frame with FIN). + if s.canBufferStreamFrame() && len(s.dataForWriting) > 0 { + if s.nextFrame == nil { + f := wire.GetStreamFrame() + f.Offset = s.writeOffset + f.StreamID = s.streamID + f.DataLenPresent = true + f.Data = f.Data[:len(s.dataForWriting)] + copy(f.Data, s.dataForWriting) + s.nextFrame = f + } else { + l := len(s.nextFrame.Data) + s.nextFrame.Data = s.nextFrame.Data[:l+len(s.dataForWriting)] + copy(s.nextFrame.Data[l:], s.dataForWriting) + } + s.dataForWriting = nil + bytesWritten = len(p) + copied = true + } else { + bytesWritten = len(p) - len(s.dataForWriting) + deadline = s.deadline + if !deadline.IsZero() { + if !time.Now().Before(deadline) { + s.dataForWriting = nil + return false, bytesWritten, errDeadline + } + if deadlineTimer == nil { + deadlineTimer = utils.NewTimer() + defer deadlineTimer.Stop() + } + deadlineTimer.Reset(deadline) + } + if s.dataForWriting == nil || s.cancelWriteErr != nil || s.closeForShutdownErr != nil { + break + } + } + + s.mutex.Unlock() + if !notifiedSender { + s.sender.onHasStreamData(s.streamID, s) // must be called without holding the mutex + notifiedSender = true + } + if copied { + s.mutex.Lock() + break + } + if deadline.IsZero() { + <-s.writeChan + } else { + select { + case <-s.writeChan: + case <-deadlineTimer.Chan(): + deadlineTimer.SetRead() + } + } + s.mutex.Lock() + } + + if bytesWritten == len(p) { + return false, bytesWritten, nil + } + if s.closeForShutdownErr != nil { + return false, bytesWritten, s.closeForShutdownErr + } else if s.cancelWriteErr != nil { + s.cancellationFlagged = true + return s.isNewlyCompleted(), bytesWritten, s.cancelWriteErr + } + return false, bytesWritten, nil +} + +func (s *sendStream) canBufferStreamFrame() bool { + var l protocol.ByteCount + if s.nextFrame != nil { + l = s.nextFrame.DataLen() + } + return l+protocol.ByteCount(len(s.dataForWriting)) <= protocol.MaxPacketBufferSize +} + +// popStreamFrame returns the next STREAM frame that is supposed to be sent on this stream +// maxBytes is the maximum length this frame (including frame header) will have. +func (s *sendStream) popStreamFrame(maxBytes protocol.ByteCount, v protocol.Version) (af ackhandler.StreamFrame, ok, hasMore bool) { + s.mutex.Lock() + f, hasMoreData, queuedControlFrame := s.popNewOrRetransmittedStreamFrame(maxBytes, v) + if f != nil { + s.numOutstandingFrames++ + } + s.mutex.Unlock() + + if queuedControlFrame { + s.sender.onHasStreamControlFrame(s.streamID, s) + } + if f == nil { + return ackhandler.StreamFrame{}, false, hasMoreData + } + return ackhandler.StreamFrame{ + Frame: f, + Handler: (*sendStreamAckHandler)(s), + }, true, hasMoreData +} + +func (s *sendStream) popNewOrRetransmittedStreamFrame(maxBytes protocol.ByteCount, v protocol.Version) (_ *wire.StreamFrame, hasMoreData, queuedControlFrame bool) { + if s.cancelWriteErr != nil || s.closeForShutdownErr != nil { + return nil, false, false + } + + if len(s.retransmissionQueue) > 0 { + f, hasMoreRetransmissions := s.maybeGetRetransmission(maxBytes, v) + if f != nil || hasMoreRetransmissions { + if f == nil { + return nil, true, false + } + // We always claim that we have more data to send. + // This might be incorrect, in which case there'll be a spurious call to popStreamFrame in the future. + return f, true, false + } + } + + if len(s.dataForWriting) == 0 && s.nextFrame == nil { + if s.finishedWriting && !s.finSent { + s.finSent = true + return &wire.StreamFrame{ + StreamID: s.streamID, + Offset: s.writeOffset, + DataLenPresent: true, + Fin: true, + }, false, false + } + return nil, false, false + } + + sendWindow := s.flowController.SendWindowSize() + if sendWindow == 0 { + if s.flowController.IsNewlyBlocked() { + s.queuedBlockedFrame = true + return nil, false, true + } + return nil, true, false + } + + f, hasMoreData := s.popNewStreamFrame(maxBytes, sendWindow, v) + if dataLen := f.DataLen(); dataLen > 0 { + s.writeOffset += f.DataLen() + s.flowController.AddBytesSent(f.DataLen()) + } + f.Fin = s.finishedWriting && s.dataForWriting == nil && s.nextFrame == nil && !s.finSent + if f.Fin { + s.finSent = true + } + return f, hasMoreData, false +} + +func (s *sendStream) popNewStreamFrame(maxBytes, sendWindow protocol.ByteCount, v protocol.Version) (*wire.StreamFrame, bool) { + if s.nextFrame != nil { + nextFrame := s.nextFrame + s.nextFrame = nil + + maxDataLen := min(sendWindow, nextFrame.MaxDataLen(maxBytes, v)) + if nextFrame.DataLen() > maxDataLen { + s.nextFrame = wire.GetStreamFrame() + s.nextFrame.StreamID = s.streamID + s.nextFrame.Offset = s.writeOffset + maxDataLen + s.nextFrame.Data = s.nextFrame.Data[:nextFrame.DataLen()-maxDataLen] + s.nextFrame.DataLenPresent = true + copy(s.nextFrame.Data, nextFrame.Data[maxDataLen:]) + nextFrame.Data = nextFrame.Data[:maxDataLen] + } else { + s.signalWrite() + } + return nextFrame, s.nextFrame != nil || s.dataForWriting != nil + } + + f := wire.GetStreamFrame() + f.Fin = false + f.StreamID = s.streamID + f.Offset = s.writeOffset + f.DataLenPresent = true + f.Data = f.Data[:0] + + hasMoreData := s.popNewStreamFrameWithoutBuffer(f, maxBytes, sendWindow, v) + if len(f.Data) == 0 && !f.Fin { + f.PutBack() + return nil, hasMoreData + } + return f, hasMoreData +} + +func (s *sendStream) popNewStreamFrameWithoutBuffer(f *wire.StreamFrame, maxBytes, sendWindow protocol.ByteCount, v protocol.Version) bool { + maxDataLen := f.MaxDataLen(maxBytes, v) + if maxDataLen == 0 { // a STREAM frame must have at least one byte of data + return s.dataForWriting != nil || s.nextFrame != nil || s.finishedWriting + } + s.getDataForWriting(f, min(maxDataLen, sendWindow)) + + return s.dataForWriting != nil || s.nextFrame != nil || s.finishedWriting +} + +func (s *sendStream) maybeGetRetransmission(maxBytes protocol.ByteCount, v protocol.Version) (*wire.StreamFrame, bool /* has more retransmissions */) { + f := s.retransmissionQueue[0] + newFrame, needsSplit := f.MaybeSplitOffFrame(maxBytes, v) + if needsSplit { + return newFrame, true + } + s.retransmissionQueue = s.retransmissionQueue[1:] + return f, len(s.retransmissionQueue) > 0 +} + +func (s *sendStream) hasData() bool { + s.mutex.Lock() + hasData := len(s.dataForWriting) > 0 + s.mutex.Unlock() + return hasData +} + +func (s *sendStream) getDataForWriting(f *wire.StreamFrame, maxBytes protocol.ByteCount) { + if protocol.ByteCount(len(s.dataForWriting)) <= maxBytes { + f.Data = f.Data[:len(s.dataForWriting)] + copy(f.Data, s.dataForWriting) + s.dataForWriting = nil + s.signalWrite() + return + } + f.Data = f.Data[:maxBytes] + copy(f.Data, s.dataForWriting) + s.dataForWriting = s.dataForWriting[maxBytes:] + if s.canBufferStreamFrame() { + s.signalWrite() + } +} + +func (s *sendStream) isNewlyCompleted() bool { + if s.completed { + return false + } + // We need to keep the stream around until all frames have been sent and acknowledged. + if s.numOutstandingFrames > 0 || len(s.retransmissionQueue) > 0 || s.queuedResetStreamFrame { + return false + } + // The stream is completed if we sent the FIN. + if s.finSent { + s.completed = true + return true + } + // The stream is also completed if: + // 1. the application called CancelWrite, or + // 2. we received a STOP_SENDING, and + // * the application consumed the error via Write, or + // * the application called Close + if s.cancelWriteErr != nil && (s.cancellationFlagged || s.finishedWriting) { + s.completed = true + return true + } + return false +} + +func (s *sendStream) Close() error { + s.mutex.Lock() + if s.closeForShutdownErr != nil { + s.mutex.Unlock() + return nil + } + s.finishedWriting = true + cancelWriteErr := s.cancelWriteErr + if cancelWriteErr != nil { + s.cancellationFlagged = true + } + completed := s.isNewlyCompleted() + s.mutex.Unlock() + + if completed { + s.sender.onStreamCompleted(s.streamID) + } + if cancelWriteErr != nil { + return fmt.Errorf("close called for canceled stream %d", s.streamID) + } + s.sender.onHasStreamData(s.streamID, s) // need to send the FIN, must be called without holding the mutex + + s.ctxCancel(nil) + return nil +} + +func (s *sendStream) CancelWrite(errorCode StreamErrorCode) { + s.cancelWriteImpl(errorCode, false) +} + +func (s *sendStream) cancelWriteImpl(errorCode qerr.StreamErrorCode, remote bool) { + s.mutex.Lock() + if !remote { + s.cancellationFlagged = true + if s.cancelWriteErr != nil { + completed := s.isNewlyCompleted() + s.mutex.Unlock() + // The user has called CancelWrite. If the previous cancellation was + // because of a STOP_SENDING, we don't need to flag the error to the + // user anymore. + if completed { + s.sender.onStreamCompleted(s.streamID) + } + return + } + } + if s.cancelWriteErr != nil { + s.mutex.Unlock() + return + } + s.cancelWriteErr = &StreamError{StreamID: s.streamID, ErrorCode: errorCode, Remote: remote} + s.ctxCancel(s.cancelWriteErr) + s.numOutstandingFrames = 0 + s.retransmissionQueue = nil + s.queuedResetStreamFrame = true + s.mutex.Unlock() + + s.signalWrite() + s.sender.onHasStreamControlFrame(s.streamID, s) +} + +func (s *sendStream) updateSendWindow(limit protocol.ByteCount) { + updated := s.flowController.UpdateSendWindow(limit) + if !updated { // duplicate or reordered MAX_STREAM_DATA frame + return + } + s.mutex.Lock() + hasStreamData := s.dataForWriting != nil || s.nextFrame != nil + s.mutex.Unlock() + if hasStreamData { + s.sender.onHasStreamData(s.streamID, s) + } +} + +func (s *sendStream) handleStopSendingFrame(frame *wire.StopSendingFrame) { + s.cancelWriteImpl(frame.ErrorCode, true) +} + +func (s *sendStream) getControlFrame() (_ ackhandler.Frame, ok, hasMore bool) { + s.mutex.Lock() + defer s.mutex.Unlock() + + if !s.queuedBlockedFrame && !s.queuedResetStreamFrame { + return ackhandler.Frame{}, false, false + } + if s.queuedBlockedFrame { + s.queuedBlockedFrame = false + return ackhandler.Frame{ + Frame: &wire.StreamDataBlockedFrame{StreamID: s.streamID, MaximumStreamData: s.writeOffset}, + }, true, s.queuedResetStreamFrame + } + // RESET_STREAM frame + s.queuedResetStreamFrame = false + s.numOutstandingFrames++ + return ackhandler.Frame{ + Frame: &wire.ResetStreamFrame{ + StreamID: s.streamID, + FinalSize: s.writeOffset, + ErrorCode: s.cancelWriteErr.ErrorCode, + }, + Handler: (*sendStreamResetStreamHandler)(s), + }, true, false +} + +func (s *sendStream) Context() context.Context { + return s.ctx +} + +func (s *sendStream) SetWriteDeadline(t time.Time) error { + s.mutex.Lock() + s.deadline = t + s.mutex.Unlock() + s.signalWrite() + return nil +} + +// CloseForShutdown closes a stream abruptly. +// It makes Write unblock (and return the error) immediately. +// The peer will NOT be informed about this: the stream is closed without sending a FIN or RST. +func (s *sendStream) closeForShutdown(err error) { + s.mutex.Lock() + s.closeForShutdownErr = err + s.mutex.Unlock() + s.signalWrite() +} + +// signalWrite performs a non-blocking send on the writeChan +func (s *sendStream) signalWrite() { + select { + case s.writeChan <- struct{}{}: + default: + } +} + +type sendStreamAckHandler sendStream + +var _ ackhandler.FrameHandler = &sendStreamAckHandler{} + +func (s *sendStreamAckHandler) OnAcked(f wire.Frame) { + sf := f.(*wire.StreamFrame) + sf.PutBack() + s.mutex.Lock() + if s.cancelWriteErr != nil { + s.mutex.Unlock() + return + } + s.numOutstandingFrames-- + if s.numOutstandingFrames < 0 { + panic("numOutStandingFrames negative") + } + completed := (*sendStream)(s).isNewlyCompleted() + s.mutex.Unlock() + + if completed { + s.sender.onStreamCompleted(s.streamID) + } +} + +func (s *sendStreamAckHandler) OnLost(f wire.Frame) { + sf := f.(*wire.StreamFrame) + s.mutex.Lock() + if s.cancelWriteErr != nil { + s.mutex.Unlock() + return + } + sf.DataLenPresent = true + s.retransmissionQueue = append(s.retransmissionQueue, sf) + s.numOutstandingFrames-- + if s.numOutstandingFrames < 0 { + panic("numOutStandingFrames negative") + } + s.mutex.Unlock() + + s.sender.onHasStreamData(s.streamID, (*sendStream)(s)) +} + +type sendStreamResetStreamHandler sendStream + +var _ ackhandler.FrameHandler = &sendStreamResetStreamHandler{} + +func (s *sendStreamResetStreamHandler) OnAcked(wire.Frame) { + s.mutex.Lock() + s.numOutstandingFrames-- + if s.numOutstandingFrames < 0 { + panic("numOutStandingFrames negative") + } + completed := (*sendStream)(s).isNewlyCompleted() + s.mutex.Unlock() + + if completed { + s.sender.onStreamCompleted(s.streamID) + } +} + +func (s *sendStreamResetStreamHandler) OnLost(wire.Frame) { + s.mutex.Lock() + s.queuedResetStreamFrame = true + s.mutex.Unlock() + s.sender.onHasStreamControlFrame(s.streamID, (*sendStream)(s)) +} diff --git a/vendor/github.com/quic-go/quic-go/server.go b/vendor/github.com/quic-go/quic-go/server.go new file mode 100644 index 00000000..0cf45aca --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/server.go @@ -0,0 +1,925 @@ +package quic + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net" + "sync" + "time" + + "github.com/quic-go/quic-go/internal/handshake" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" + "github.com/quic-go/quic-go/logging" +) + +// ErrServerClosed is returned by the Listener or EarlyListener's Accept method after a call to Close. +var ErrServerClosed = errServerClosed{} + +type errServerClosed struct{} + +func (errServerClosed) Error() string { return "quic: server closed" } +func (errServerClosed) Unwrap() error { return net.ErrClosed } + +// packetHandler handles packets +type packetHandler interface { + handlePacket(receivedPacket) + destroy(error) + closeWithTransportError(qerr.TransportErrorCode) +} + +type packetHandlerManager interface { + Get(protocol.ConnectionID) (packetHandler, bool) + GetByResetToken(protocol.StatelessResetToken) (packetHandler, bool) + AddWithConnID(destConnID, newConnID protocol.ConnectionID, h packetHandler) bool + Close(error) + connRunner +} + +type quicConn interface { + EarlyConnection + earlyConnReady() <-chan struct{} + handlePacket(receivedPacket) + run() error + destroy(error) + closeWithTransportError(TransportErrorCode) +} + +type zeroRTTQueue struct { + packets []receivedPacket + expiration time.Time +} + +type rejectedPacket struct { + receivedPacket + hdr *wire.Header +} + +// A Listener of QUIC +type baseServer struct { + disableVersionNegotiation bool + acceptEarlyConns bool + + tlsConf *tls.Config + config *Config + + conn rawConn + + tokenGenerator *handshake.TokenGenerator + maxTokenAge time.Duration + + connIDGenerator ConnectionIDGenerator + connHandler packetHandlerManager + onClose func() + + receivedPackets chan receivedPacket + + nextZeroRTTCleanup time.Time + zeroRTTQueues map[protocol.ConnectionID]*zeroRTTQueue // only initialized if acceptEarlyConns == true + + connContext func(context.Context) context.Context + + // set as a member, so they can be set in the tests + newConn func( + context.Context, + context.CancelCauseFunc, + sendConn, + connRunner, + protocol.ConnectionID, /* original dest connection ID */ + *protocol.ConnectionID, /* retry src connection ID */ + protocol.ConnectionID, /* client dest connection ID */ + protocol.ConnectionID, /* destination connection ID */ + protocol.ConnectionID, /* source connection ID */ + ConnectionIDGenerator, + protocol.StatelessResetToken, + *Config, + *tls.Config, + *handshake.TokenGenerator, + bool, /* client address validated by an address validation token */ + *logging.ConnectionTracer, + utils.Logger, + protocol.Version, + ) quicConn + + closeMx sync.Mutex + errorChan chan struct{} // is closed when the server is closed + closeErr error + running chan struct{} // closed as soon as run() returns + + versionNegotiationQueue chan receivedPacket + invalidTokenQueue chan rejectedPacket + connectionRefusedQueue chan rejectedPacket + retryQueue chan rejectedPacket + + verifySourceAddress func(net.Addr) bool + + connQueue chan quicConn + + tracer *logging.Tracer + + logger utils.Logger +} + +// A Listener listens for incoming QUIC connections. +// It returns connections once the handshake has completed. +type Listener struct { + baseServer *baseServer +} + +// Accept returns new connections. It should be called in a loop. +func (l *Listener) Accept(ctx context.Context) (Connection, error) { + return l.baseServer.Accept(ctx) +} + +// Close closes the listener. +// Accept will return ErrServerClosed as soon as all connections in the accept queue have been accepted. +// QUIC handshakes that are still in flight will be rejected with a CONNECTION_REFUSED error. +// The effect of closing the listener depends on how it was created: +// * if it was created using Transport.Listen, already established connections will be unaffected +// * if it was created using the Listen convenience method, all established connection will be closed immediately +func (l *Listener) Close() error { + return l.baseServer.Close() +} + +// Addr returns the local network address that the server is listening on. +func (l *Listener) Addr() net.Addr { + return l.baseServer.Addr() +} + +// An EarlyListener listens for incoming QUIC connections, and returns them before the handshake completes. +// For connections that don't use 0-RTT, this allows the server to send 0.5-RTT data. +// This data is encrypted with forward-secure keys, however, the client's identity has not yet been verified. +// For connection using 0-RTT, this allows the server to accept and respond to streams that the client opened in the +// 0-RTT data it sent. Note that at this point during the handshake, the live-ness of the +// client has not yet been confirmed, and the 0-RTT data could have been replayed by an attacker. +type EarlyListener struct { + baseServer *baseServer +} + +// Accept returns a new connections. It should be called in a loop. +func (l *EarlyListener) Accept(ctx context.Context) (EarlyConnection, error) { + return l.baseServer.accept(ctx) +} + +// Close the server. All active connections will be closed. +func (l *EarlyListener) Close() error { + return l.baseServer.Close() +} + +// Addr returns the local network addr that the server is listening on. +func (l *EarlyListener) Addr() net.Addr { + return l.baseServer.Addr() +} + +// ListenAddr creates a QUIC server listening on a given address. +// See Listen for more details. +func ListenAddr(addr string, tlsConf *tls.Config, config *Config) (*Listener, error) { + conn, err := listenUDP(addr) + if err != nil { + return nil, err + } + return (&Transport{ + Conn: conn, + createdConn: true, + isSingleUse: true, + }).Listen(tlsConf, config) +} + +// ListenAddrEarly works like ListenAddr, but it returns connections before the handshake completes. +func ListenAddrEarly(addr string, tlsConf *tls.Config, config *Config) (*EarlyListener, error) { + conn, err := listenUDP(addr) + if err != nil { + return nil, err + } + return (&Transport{ + Conn: conn, + createdConn: true, + isSingleUse: true, + }).ListenEarly(tlsConf, config) +} + +func listenUDP(addr string) (*net.UDPConn, error) { + udpAddr, err := net.ResolveUDPAddr("udp", addr) + if err != nil { + return nil, err + } + return net.ListenUDP("udp", udpAddr) +} + +// Listen listens for QUIC connections on a given net.PacketConn. +// If the PacketConn satisfies the OOBCapablePacketConn interface (as a net.UDPConn does), +// ECN and packet info support will be enabled. In this case, ReadMsgUDP and WriteMsgUDP +// will be used instead of ReadFrom and WriteTo to read/write packets. +// A single net.PacketConn can only be used for a single call to Listen. +// +// The tls.Config must not be nil and must contain a certificate configuration. +// Furthermore, it must define an application control (using NextProtos). +// The quic.Config may be nil, in that case the default values will be used. +// +// This is a convenience function. More advanced use cases should instantiate a Transport, +// which offers configuration options for a more fine-grained control of the connection establishment, +// including reusing the underlying UDP socket for outgoing QUIC connections. +// When closing a listener created with Listen, all established QUIC connections will be closed immediately. +func Listen(conn net.PacketConn, tlsConf *tls.Config, config *Config) (*Listener, error) { + tr := &Transport{Conn: conn, isSingleUse: true} + return tr.Listen(tlsConf, config) +} + +// ListenEarly works like Listen, but it returns connections before the handshake completes. +func ListenEarly(conn net.PacketConn, tlsConf *tls.Config, config *Config) (*EarlyListener, error) { + tr := &Transport{Conn: conn, isSingleUse: true} + return tr.ListenEarly(tlsConf, config) +} + +func newServer( + conn rawConn, + connHandler packetHandlerManager, + connIDGenerator ConnectionIDGenerator, + connContext func(context.Context) context.Context, + tlsConf *tls.Config, + config *Config, + tracer *logging.Tracer, + onClose func(), + tokenGeneratorKey TokenGeneratorKey, + maxTokenAge time.Duration, + verifySourceAddress func(net.Addr) bool, + disableVersionNegotiation bool, + acceptEarly bool, +) *baseServer { + s := &baseServer{ + conn: conn, + connContext: connContext, + tlsConf: tlsConf, + config: config, + tokenGenerator: handshake.NewTokenGenerator(tokenGeneratorKey), + maxTokenAge: maxTokenAge, + verifySourceAddress: verifySourceAddress, + connIDGenerator: connIDGenerator, + connHandler: connHandler, + connQueue: make(chan quicConn, protocol.MaxAcceptQueueSize), + errorChan: make(chan struct{}), + running: make(chan struct{}), + receivedPackets: make(chan receivedPacket, protocol.MaxServerUnprocessedPackets), + versionNegotiationQueue: make(chan receivedPacket, 4), + invalidTokenQueue: make(chan rejectedPacket, 4), + connectionRefusedQueue: make(chan rejectedPacket, 4), + retryQueue: make(chan rejectedPacket, 8), + newConn: newConnection, + tracer: tracer, + logger: utils.DefaultLogger.WithPrefix("server"), + acceptEarlyConns: acceptEarly, + disableVersionNegotiation: disableVersionNegotiation, + onClose: onClose, + } + if acceptEarly { + s.zeroRTTQueues = map[protocol.ConnectionID]*zeroRTTQueue{} + } + go s.run() + go s.runSendQueue() + s.logger.Debugf("Listening for %s connections on %s", conn.LocalAddr().Network(), conn.LocalAddr().String()) + return s +} + +func (s *baseServer) run() { + defer close(s.running) + for { + select { + case <-s.errorChan: + return + default: + } + select { + case <-s.errorChan: + return + case p := <-s.receivedPackets: + if bufferStillInUse := s.handlePacketImpl(p); !bufferStillInUse { + p.buffer.Release() + } + } + } +} + +func (s *baseServer) runSendQueue() { + for { + select { + case <-s.running: + return + case p := <-s.versionNegotiationQueue: + s.maybeSendVersionNegotiationPacket(p) + case p := <-s.invalidTokenQueue: + s.maybeSendInvalidToken(p) + case p := <-s.connectionRefusedQueue: + s.sendConnectionRefused(p) + case p := <-s.retryQueue: + s.sendRetry(p) + } + } +} + +// Accept returns connections that already completed the handshake. +// It is only valid if acceptEarlyConns is false. +func (s *baseServer) Accept(ctx context.Context) (Connection, error) { + return s.accept(ctx) +} + +func (s *baseServer) accept(ctx context.Context) (quicConn, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case conn := <-s.connQueue: + return conn, nil + case <-s.errorChan: + return nil, s.closeErr + } +} + +func (s *baseServer) Close() error { + s.close(ErrServerClosed, true) + return nil +} + +func (s *baseServer) close(e error, notifyOnClose bool) { + s.closeMx.Lock() + if s.closeErr != nil { + s.closeMx.Unlock() + return + } + s.closeErr = e + close(s.errorChan) + <-s.running + s.closeMx.Unlock() + + if notifyOnClose { + s.onClose() + } +} + +// Addr returns the server's network address +func (s *baseServer) Addr() net.Addr { + return s.conn.LocalAddr() +} + +func (s *baseServer) handlePacket(p receivedPacket) { + select { + case s.receivedPackets <- p: + default: + s.logger.Debugf("Dropping packet from %s (%d bytes). Server receive queue full.", p.remoteAddr, p.Size()) + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropDOSPrevention) + } + } +} + +func (s *baseServer) handlePacketImpl(p receivedPacket) bool /* is the buffer still in use? */ { + if !s.nextZeroRTTCleanup.IsZero() && p.rcvTime.After(s.nextZeroRTTCleanup) { + defer s.cleanupZeroRTTQueues(p.rcvTime) + } + + if wire.IsVersionNegotiationPacket(p.data) { + s.logger.Debugf("Dropping Version Negotiation packet.") + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeVersionNegotiation, p.Size(), logging.PacketDropUnexpectedPacket) + } + return false + } + // Short header packets should never end up here in the first place + if !wire.IsLongHeaderPacket(p.data[0]) { + panic(fmt.Sprintf("misrouted packet: %#v", p.data)) + } + v, err := wire.ParseVersion(p.data) + // drop the packet if we failed to parse the protocol version + if err != nil { + s.logger.Debugf("Dropping a packet with an unknown version") + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropUnexpectedPacket) + } + return false + } + // send a Version Negotiation Packet if the client is speaking a different protocol version + if !protocol.IsSupportedVersion(s.config.Versions, v) { + if s.disableVersionNegotiation { + return false + } + + if p.Size() < protocol.MinUnknownVersionPacketSize { + s.logger.Debugf("Dropping a packet with an unsupported version number %d that is too small (%d bytes)", v, p.Size()) + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropUnexpectedPacket) + } + return false + } + return s.enqueueVersionNegotiationPacket(p) + } + + if wire.Is0RTTPacket(p.data) { + if !s.acceptEarlyConns { + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(p.remoteAddr, logging.PacketType0RTT, p.Size(), logging.PacketDropUnexpectedPacket) + } + return false + } + return s.handle0RTTPacket(p) + } + + // If we're creating a new connection, the packet will be passed to the connection. + // The header will then be parsed again. + hdr, _, _, err := wire.ParsePacket(p.data) + if err != nil { + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropHeaderParseError) + } + s.logger.Debugf("Error parsing packet: %s", err) + return false + } + if hdr.Type == protocol.PacketTypeInitial && p.Size() < protocol.MinInitialPacketSize { + s.logger.Debugf("Dropping a packet that is too small to be a valid Initial (%d bytes)", p.Size()) + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeInitial, p.Size(), logging.PacketDropUnexpectedPacket) + } + return false + } + + if hdr.Type != protocol.PacketTypeInitial { + // Drop long header packets. + // There's little point in sending a Stateless Reset, since the client + // might not have received the token yet. + s.logger.Debugf("Dropping long header packet of type %s (%d bytes)", hdr.Type, len(p.data)) + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeFromHeader(hdr), p.Size(), logging.PacketDropUnexpectedPacket) + } + return false + } + + s.logger.Debugf("<- Received Initial packet.") + + if err := s.handleInitialImpl(p, hdr); err != nil { + s.logger.Errorf("Error occurred handling initial packet: %s", err) + } + // Don't put the packet buffer back. + // handleInitialImpl deals with the buffer. + return true +} + +func (s *baseServer) handle0RTTPacket(p receivedPacket) bool { + connID, err := wire.ParseConnectionID(p.data, 0) + if err != nil { + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(p.remoteAddr, logging.PacketType0RTT, p.Size(), logging.PacketDropHeaderParseError) + } + return false + } + + // check again if we might have a connection now + if handler, ok := s.connHandler.Get(connID); ok { + handler.handlePacket(p) + return true + } + + if q, ok := s.zeroRTTQueues[connID]; ok { + if len(q.packets) >= protocol.Max0RTTQueueLen { + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(p.remoteAddr, logging.PacketType0RTT, p.Size(), logging.PacketDropDOSPrevention) + } + return false + } + q.packets = append(q.packets, p) + return true + } + + if len(s.zeroRTTQueues) >= protocol.Max0RTTQueues { + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(p.remoteAddr, logging.PacketType0RTT, p.Size(), logging.PacketDropDOSPrevention) + } + return false + } + queue := &zeroRTTQueue{packets: make([]receivedPacket, 1, 8)} + queue.packets[0] = p + expiration := p.rcvTime.Add(protocol.Max0RTTQueueingDuration) + queue.expiration = expiration + if s.nextZeroRTTCleanup.IsZero() || s.nextZeroRTTCleanup.After(expiration) { + s.nextZeroRTTCleanup = expiration + } + s.zeroRTTQueues[connID] = queue + return true +} + +func (s *baseServer) cleanupZeroRTTQueues(now time.Time) { + // Iterate over all queues to find those that are expired. + // This is ok since we're placing a pretty low limit on the number of queues. + var nextCleanup time.Time + for connID, q := range s.zeroRTTQueues { + if q.expiration.After(now) { + if nextCleanup.IsZero() || nextCleanup.After(q.expiration) { + nextCleanup = q.expiration + } + continue + } + for _, p := range q.packets { + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(p.remoteAddr, logging.PacketType0RTT, p.Size(), logging.PacketDropDOSPrevention) + } + p.buffer.Release() + } + delete(s.zeroRTTQueues, connID) + if s.logger.Debug() { + s.logger.Debugf("Removing 0-RTT queue for %s.", connID) + } + } + s.nextZeroRTTCleanup = nextCleanup +} + +// validateToken returns false if: +// - address is invalid +// - token is expired +// - token is null +func (s *baseServer) validateToken(token *handshake.Token, addr net.Addr) bool { + if token == nil { + return false + } + if !token.ValidateRemoteAddr(addr) { + return false + } + if !token.IsRetryToken && time.Since(token.SentTime) > s.maxTokenAge { + return false + } + if token.IsRetryToken && time.Since(token.SentTime) > s.config.maxRetryTokenAge() { + return false + } + return true +} + +func (s *baseServer) handleInitialImpl(p receivedPacket, hdr *wire.Header) error { + if len(hdr.Token) == 0 && hdr.DestConnectionID.Len() < protocol.MinConnectionIDLenInitial { + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeInitial, p.Size(), logging.PacketDropUnexpectedPacket) + } + p.buffer.Release() + return errors.New("too short connection ID") + } + + // The server queues packets for a while, and we might already have established a connection by now. + // This results in a second check in the connection map. + // That's ok since it's not the hot path (it's only taken by some Initial and 0-RTT packets). + if handler, ok := s.connHandler.Get(hdr.DestConnectionID); ok { + handler.handlePacket(p) + return nil + } + + var ( + token *handshake.Token + retrySrcConnID *protocol.ConnectionID + clientAddrVerified bool + ) + origDestConnID := hdr.DestConnectionID + if len(hdr.Token) > 0 { + tok, err := s.tokenGenerator.DecodeToken(hdr.Token) + if err == nil { + if tok.IsRetryToken { + origDestConnID = tok.OriginalDestConnectionID + retrySrcConnID = &tok.RetrySrcConnectionID + } + token = tok + } + } + if token != nil { + clientAddrVerified = s.validateToken(token, p.remoteAddr) + if !clientAddrVerified { + // For invalid and expired non-retry tokens, we don't send an INVALID_TOKEN error. + // We just ignore them, and act as if there was no token on this packet at all. + // This also means we might send a Retry later. + if !token.IsRetryToken { + token = nil + } else { + // For Retry tokens, we send an INVALID_ERROR if + // * the token is too old, or + // * the token is invalid, in case of a retry token. + select { + case s.invalidTokenQueue <- rejectedPacket{receivedPacket: p, hdr: hdr}: + default: + // drop packet if we can't send out the INVALID_TOKEN packets fast enough + p.buffer.Release() + } + return nil + } + } + } + + if token == nil && s.verifySourceAddress != nil && s.verifySourceAddress(p.remoteAddr) { + // Retry invalidates all 0-RTT packets sent. + delete(s.zeroRTTQueues, hdr.DestConnectionID) + select { + case s.retryQueue <- rejectedPacket{receivedPacket: p, hdr: hdr}: + default: + // drop packet if we can't send out Retry packets fast enough + p.buffer.Release() + } + return nil + } + + config := s.config + if s.config.GetConfigForClient != nil { + conf, err := s.config.GetConfigForClient(&ClientHelloInfo{ + RemoteAddr: p.remoteAddr, + AddrVerified: clientAddrVerified, + }) + if err != nil { + s.logger.Debugf("Rejecting new connection due to GetConfigForClient callback") + delete(s.zeroRTTQueues, hdr.DestConnectionID) + select { + case s.connectionRefusedQueue <- rejectedPacket{receivedPacket: p, hdr: hdr}: + default: + // drop packet if we can't send out the CONNECTION_REFUSED fast enough + p.buffer.Release() + } + return nil + } + config = populateConfig(conf) + } + + var conn quicConn + var cancel context.CancelCauseFunc + ctx, cancel1 := context.WithCancelCause(context.Background()) + if s.connContext != nil { + ctx = s.connContext(ctx) + if ctx == nil { + panic("quic: ConnContext returned nil") + } + // There's no guarantee that the application returns a context + // that's derived from the context we passed into ConnContext. + // We need to make sure that both contexts are cancelled. + var cancel2 context.CancelCauseFunc + ctx, cancel2 = context.WithCancelCause(ctx) + cancel = func(cause error) { + cancel1(cause) + cancel2(cause) + } + } else { + cancel = cancel1 + } + ctx = context.WithValue(ctx, ConnectionTracingKey, nextConnTracingID()) + var tracer *logging.ConnectionTracer + if config.Tracer != nil { + // Use the same connection ID that is passed to the client's GetLogWriter callback. + connID := hdr.DestConnectionID + if origDestConnID.Len() > 0 { + connID = origDestConnID + } + tracer = config.Tracer(ctx, protocol.PerspectiveServer, connID) + } + connID, err := s.connIDGenerator.GenerateConnectionID() + if err != nil { + return err + } + s.logger.Debugf("Changing connection ID to %s.", connID) + conn = s.newConn( + ctx, + cancel, + newSendConn(s.conn, p.remoteAddr, p.info, s.logger), + s.connHandler, + origDestConnID, + retrySrcConnID, + hdr.DestConnectionID, + hdr.SrcConnectionID, + connID, + s.connIDGenerator, + s.connHandler.GetStatelessResetToken(connID), + config, + s.tlsConf, + s.tokenGenerator, + clientAddrVerified, + tracer, + s.logger, + hdr.Version, + ) + conn.handlePacket(p) + // Adding the connection will fail if the client's chosen Destination Connection ID is already in use. + // This is very unlikely: Even if an attacker chooses a connection ID that's already in use, + // under normal circumstances the packet would just be routed to that connection. + // The only time this collision will occur if we receive the two Initial packets at the same time. + if added := s.connHandler.AddWithConnID(hdr.DestConnectionID, connID, conn); !added { + delete(s.zeroRTTQueues, hdr.DestConnectionID) + conn.closeWithTransportError(qerr.ConnectionRefused) + return nil + } + // Pass queued 0-RTT to the newly established connection. + if q, ok := s.zeroRTTQueues[hdr.DestConnectionID]; ok { + for _, p := range q.packets { + conn.handlePacket(p) + } + delete(s.zeroRTTQueues, hdr.DestConnectionID) + } + + go conn.run() + go func() { + if completed := s.handleNewConn(conn); !completed { + return + } + + select { + case s.connQueue <- conn: + default: + conn.closeWithTransportError(ConnectionRefused) + } + }() + return nil +} + +func (s *baseServer) handleNewConn(conn quicConn) bool { + if s.acceptEarlyConns { + // wait until the early connection is ready, the handshake fails, or the server is closed + select { + case <-s.errorChan: + conn.closeWithTransportError(ConnectionRefused) + return false + case <-conn.Context().Done(): + return false + case <-conn.earlyConnReady(): + return true + } + } + // wait until the handshake completes, fails, or the server is closed + select { + case <-s.errorChan: + conn.closeWithTransportError(ConnectionRefused) + return false + case <-conn.Context().Done(): + return false + case <-conn.HandshakeComplete(): + return true + } +} + +func (s *baseServer) sendRetry(p rejectedPacket) { + if err := s.sendRetryPacket(p); err != nil { + s.logger.Debugf("Error sending Retry packet: %s", err) + } +} + +func (s *baseServer) sendRetryPacket(p rejectedPacket) error { + hdr := p.hdr + // Log the Initial packet now. + // If no Retry is sent, the packet will be logged by the connection. + (&wire.ExtendedHeader{Header: *hdr}).Log(s.logger) + srcConnID, err := s.connIDGenerator.GenerateConnectionID() + if err != nil { + return err + } + token, err := s.tokenGenerator.NewRetryToken(p.remoteAddr, hdr.DestConnectionID, srcConnID) + if err != nil { + return err + } + replyHdr := &wire.ExtendedHeader{} + replyHdr.Type = protocol.PacketTypeRetry + replyHdr.Version = hdr.Version + replyHdr.SrcConnectionID = srcConnID + replyHdr.DestConnectionID = hdr.SrcConnectionID + replyHdr.Token = token + if s.logger.Debug() { + s.logger.Debugf("Changing connection ID to %s.", srcConnID) + s.logger.Debugf("-> Sending Retry") + replyHdr.Log(s.logger) + } + + buf := getPacketBuffer() + defer buf.Release() + buf.Data, err = replyHdr.Append(buf.Data, hdr.Version) + if err != nil { + return err + } + // append the Retry integrity tag + tag := handshake.GetRetryIntegrityTag(buf.Data, hdr.DestConnectionID, hdr.Version) + buf.Data = append(buf.Data, tag[:]...) + if s.tracer != nil && s.tracer.SentPacket != nil { + s.tracer.SentPacket(p.remoteAddr, &replyHdr.Header, protocol.ByteCount(len(buf.Data)), nil) + } + _, err = s.conn.WritePacket(buf.Data, p.remoteAddr, p.info.OOB(), 0, protocol.ECNUnsupported) + return err +} + +func (s *baseServer) maybeSendInvalidToken(p rejectedPacket) { + defer p.buffer.Release() + + // Only send INVALID_TOKEN if we can unprotect the packet. + // This makes sure that we won't send it for packets that were corrupted. + hdr := p.hdr + sealer, opener := handshake.NewInitialAEAD(hdr.DestConnectionID, protocol.PerspectiveServer, hdr.Version) + data := p.data[:hdr.ParsedLen()+hdr.Length] + extHdr, err := unpackLongHeader(opener, hdr, data) + // Only send INVALID_TOKEN if we can unprotect the packet. + // This makes sure that we won't send it for packets that were corrupted. + if err != nil { + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeInitial, p.Size(), logging.PacketDropHeaderParseError) + } + return + } + hdrLen := extHdr.ParsedLen() + if _, err := opener.Open(data[hdrLen:hdrLen], data[hdrLen:], extHdr.PacketNumber, data[:hdrLen]); err != nil { + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeInitial, p.Size(), logging.PacketDropPayloadDecryptError) + } + return + } + if s.logger.Debug() { + s.logger.Debugf("Client sent an invalid retry token. Sending INVALID_TOKEN to %s.", p.remoteAddr) + } + if err := s.sendError(p.remoteAddr, hdr, sealer, qerr.InvalidToken, p.info); err != nil { + s.logger.Debugf("Error sending INVALID_TOKEN error: %s", err) + } +} + +func (s *baseServer) sendConnectionRefused(p rejectedPacket) { + defer p.buffer.Release() + sealer, _ := handshake.NewInitialAEAD(p.hdr.DestConnectionID, protocol.PerspectiveServer, p.hdr.Version) + if err := s.sendError(p.remoteAddr, p.hdr, sealer, qerr.ConnectionRefused, p.info); err != nil { + s.logger.Debugf("Error sending CONNECTION_REFUSED error: %s", err) + } +} + +// sendError sends the error as a response to the packet received with header hdr +func (s *baseServer) sendError(remoteAddr net.Addr, hdr *wire.Header, sealer handshake.LongHeaderSealer, errorCode qerr.TransportErrorCode, info packetInfo) error { + b := getPacketBuffer() + defer b.Release() + + ccf := &wire.ConnectionCloseFrame{ErrorCode: uint64(errorCode)} + + replyHdr := &wire.ExtendedHeader{} + replyHdr.Type = protocol.PacketTypeInitial + replyHdr.Version = hdr.Version + replyHdr.SrcConnectionID = hdr.DestConnectionID + replyHdr.DestConnectionID = hdr.SrcConnectionID + replyHdr.PacketNumberLen = protocol.PacketNumberLen4 + replyHdr.Length = 4 /* packet number len */ + ccf.Length(hdr.Version) + protocol.ByteCount(sealer.Overhead()) + var err error + b.Data, err = replyHdr.Append(b.Data, hdr.Version) + if err != nil { + return err + } + payloadOffset := len(b.Data) + + b.Data, err = ccf.Append(b.Data, hdr.Version) + if err != nil { + return err + } + + _ = sealer.Seal(b.Data[payloadOffset:payloadOffset], b.Data[payloadOffset:], replyHdr.PacketNumber, b.Data[:payloadOffset]) + b.Data = b.Data[0 : len(b.Data)+sealer.Overhead()] + + pnOffset := payloadOffset - int(replyHdr.PacketNumberLen) + sealer.EncryptHeader( + b.Data[pnOffset+4:pnOffset+4+16], + &b.Data[0], + b.Data[pnOffset:payloadOffset], + ) + + replyHdr.Log(s.logger) + wire.LogFrame(s.logger, ccf, true) + if s.tracer != nil && s.tracer.SentPacket != nil { + s.tracer.SentPacket(remoteAddr, &replyHdr.Header, protocol.ByteCount(len(b.Data)), []logging.Frame{ccf}) + } + _, err = s.conn.WritePacket(b.Data, remoteAddr, info.OOB(), 0, protocol.ECNUnsupported) + return err +} + +func (s *baseServer) enqueueVersionNegotiationPacket(p receivedPacket) (bufferInUse bool) { + select { + case s.versionNegotiationQueue <- p: + return true + default: + // it's fine to not send version negotiation packets when we are busy + } + return false +} + +func (s *baseServer) maybeSendVersionNegotiationPacket(p receivedPacket) { + defer p.buffer.Release() + + v, err := wire.ParseVersion(p.data) + if err != nil { + s.logger.Debugf("failed to parse version for sending version negotiation packet: %s", err) + return + } + + _, src, dest, err := wire.ParseArbitraryLenConnectionIDs(p.data) + if err != nil { // should never happen + s.logger.Debugf("Dropping a packet with an unknown version for which we failed to parse connection IDs") + if s.tracer != nil && s.tracer.DroppedPacket != nil { + s.tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropUnexpectedPacket) + } + return + } + + s.logger.Debugf("Client offered version %s, sending Version Negotiation", v) + + data := wire.ComposeVersionNegotiation(dest, src, s.config.Versions) + if s.tracer != nil && s.tracer.SentVersionNegotiationPacket != nil { + s.tracer.SentVersionNegotiationPacket(p.remoteAddr, src, dest, s.config.Versions) + } + if _, err := s.conn.WritePacket(data, p.remoteAddr, p.info.OOB(), 0, protocol.ECNUnsupported); err != nil { + s.logger.Debugf("Error sending Version Negotiation: %s", err) + } +} diff --git a/vendor/github.com/quic-go/quic-go/stream.go b/vendor/github.com/quic-go/quic-go/stream.go new file mode 100644 index 00000000..1ed26323 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/stream.go @@ -0,0 +1,162 @@ +package quic + +import ( + "context" + "net" + "os" + "sync" + "time" + + "github.com/quic-go/quic-go/internal/ackhandler" + "github.com/quic-go/quic-go/internal/flowcontrol" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/wire" +) + +type deadlineError struct{} + +func (deadlineError) Error() string { return "deadline exceeded" } +func (deadlineError) Temporary() bool { return true } +func (deadlineError) Timeout() bool { return true } +func (deadlineError) Unwrap() error { return os.ErrDeadlineExceeded } + +var errDeadline net.Error = &deadlineError{} + +// The streamSender is notified by the stream about various events. +type streamSender interface { + onHasStreamData(protocol.StreamID, sendStreamI) + onHasStreamControlFrame(protocol.StreamID, streamControlFrameGetter) + // must be called without holding the mutex that is acquired by closeForShutdown + onStreamCompleted(protocol.StreamID) +} + +// Each of the both stream halves gets its own uniStreamSender. +// This is necessary in order to keep track when both halves have been completed. +type uniStreamSender struct { + streamSender + onStreamCompletedImpl func() + onHasStreamControlFrameImpl func(protocol.StreamID, streamControlFrameGetter) +} + +func (s *uniStreamSender) onHasStreamData(id protocol.StreamID, str sendStreamI) { + s.streamSender.onHasStreamData(id, str) +} +func (s *uniStreamSender) onStreamCompleted(protocol.StreamID) { s.onStreamCompletedImpl() } +func (s *uniStreamSender) onHasStreamControlFrame(id protocol.StreamID, str streamControlFrameGetter) { + s.onHasStreamControlFrameImpl(id, str) +} + +var _ streamSender = &uniStreamSender{} + +type streamI interface { + Stream + closeForShutdown(error) + // for receiving + handleStreamFrame(*wire.StreamFrame) error + handleResetStreamFrame(*wire.ResetStreamFrame) error + // for sending + hasData() bool + handleStopSendingFrame(*wire.StopSendingFrame) + popStreamFrame(maxBytes protocol.ByteCount, v protocol.Version) (ackhandler.StreamFrame, bool, bool) + updateSendWindow(protocol.ByteCount) +} + +var ( + _ receiveStreamI = (streamI)(nil) + _ sendStreamI = (streamI)(nil) +) + +// A Stream assembles the data from StreamFrames and provides a super-convenient Read-Interface +// +// Read() and Write() may be called concurrently, but multiple calls to Read() or Write() individually must be synchronized manually. +type stream struct { + receiveStream + sendStream + + completedMutex sync.Mutex + sender streamSender + receiveStreamCompleted bool + sendStreamCompleted bool +} + +var ( + _ Stream = &stream{} + _ streamControlFrameGetter = &receiveStream{} +) + +// newStream creates a new Stream +func newStream( + ctx context.Context, + streamID protocol.StreamID, + sender streamSender, + flowController flowcontrol.StreamFlowController, +) *stream { + s := &stream{sender: sender} + senderForSendStream := &uniStreamSender{ + streamSender: sender, + onStreamCompletedImpl: func() { + s.completedMutex.Lock() + s.sendStreamCompleted = true + s.checkIfCompleted() + s.completedMutex.Unlock() + }, + onHasStreamControlFrameImpl: func(id protocol.StreamID, str streamControlFrameGetter) { + sender.onHasStreamControlFrame(streamID, s) + }, + } + s.sendStream = *newSendStream(ctx, streamID, senderForSendStream, flowController) + senderForReceiveStream := &uniStreamSender{ + streamSender: sender, + onStreamCompletedImpl: func() { + s.completedMutex.Lock() + s.receiveStreamCompleted = true + s.checkIfCompleted() + s.completedMutex.Unlock() + }, + onHasStreamControlFrameImpl: func(id protocol.StreamID, str streamControlFrameGetter) { + sender.onHasStreamControlFrame(streamID, s) + }, + } + s.receiveStream = *newReceiveStream(streamID, senderForReceiveStream, flowController) + return s +} + +// need to define StreamID() here, since both receiveStream and readStream have a StreamID() +func (s *stream) StreamID() protocol.StreamID { + // the result is same for receiveStream and sendStream + return s.sendStream.StreamID() +} + +func (s *stream) Close() error { + return s.sendStream.Close() +} + +func (s *stream) getControlFrame() (_ ackhandler.Frame, ok, hasMore bool) { + f, ok, _ := s.sendStream.getControlFrame() + if ok { + return f, true, true + } + return s.receiveStream.getControlFrame() +} + +func (s *stream) SetDeadline(t time.Time) error { + _ = s.SetReadDeadline(t) // SetReadDeadline never errors + _ = s.SetWriteDeadline(t) // SetWriteDeadline never errors + return nil +} + +// CloseForShutdown closes a stream abruptly. +// It makes Read and Write unblock (and return the error) immediately. +// The peer will NOT be informed about this: the stream is closed without sending a FIN or RST. +func (s *stream) closeForShutdown(err error) { + s.sendStream.closeForShutdown(err) + s.receiveStream.closeForShutdown(err) +} + +// checkIfCompleted is called from the uniStreamSender, when one of the stream halves is completed. +// It makes sure that the onStreamCompleted callback is only called if both receive and send side have completed. +func (s *stream) checkIfCompleted() { + if s.sendStreamCompleted && s.receiveStreamCompleted { + s.sender.onStreamCompleted(s.StreamID()) + } +} diff --git a/vendor/github.com/quic-go/quic-go/streams_map.go b/vendor/github.com/quic-go/quic-go/streams_map.go new file mode 100644 index 00000000..0ce91287 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/streams_map.go @@ -0,0 +1,334 @@ +package quic + +import ( + "context" + "errors" + "fmt" + "net" + "sync" + + "github.com/quic-go/quic-go/internal/flowcontrol" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/wire" +) + +type streamError struct { + message string + nums []protocol.StreamNum +} + +func (e streamError) Error() string { + return e.message +} + +func convertStreamError(err error, stype protocol.StreamType, pers protocol.Perspective) error { + strError, ok := err.(streamError) + if !ok { + return err + } + ids := make([]interface{}, len(strError.nums)) + for i, num := range strError.nums { + ids[i] = num.StreamID(stype, pers) + } + return fmt.Errorf(strError.Error(), ids...) +} + +type streamOpenErr struct{ error } + +var _ net.Error = &streamOpenErr{} + +func (streamOpenErr) Timeout() bool { return false } +func (e streamOpenErr) Unwrap() error { return e.error } + +func (e streamOpenErr) Temporary() bool { + // In older versions of quic-go, the stream limit error was documented to be a net.Error.Temporary. + // This function was since deprecated, but we keep the existing behavior. + return errors.Is(e, &StreamLimitReachedError{}) +} + +// StreamLimitReachedError is returned from Connection.OpenStream and Connection.OpenUniStream +// when it is not possible to open a new stream because the number of opens streams reached +// the peer's stream limit. +type StreamLimitReachedError struct{} + +func (e StreamLimitReachedError) Error() string { return "too many open streams" } + +type streamsMap struct { + ctx context.Context // not used for cancellations, but carries the values associated with the connection + perspective protocol.Perspective + + maxIncomingBidiStreams uint64 + maxIncomingUniStreams uint64 + + sender streamSender + queueControlFrame func(wire.Frame) + newFlowController func(protocol.StreamID) flowcontrol.StreamFlowController + + mutex sync.Mutex + outgoingBidiStreams *outgoingStreamsMap[streamI] + outgoingUniStreams *outgoingStreamsMap[sendStreamI] + incomingBidiStreams *incomingStreamsMap[streamI] + incomingUniStreams *incomingStreamsMap[receiveStreamI] + reset bool +} + +var _ streamManager = &streamsMap{} + +func newStreamsMap( + ctx context.Context, + sender streamSender, + queueControlFrame func(wire.Frame), + newFlowController func(protocol.StreamID) flowcontrol.StreamFlowController, + maxIncomingBidiStreams uint64, + maxIncomingUniStreams uint64, + perspective protocol.Perspective, +) *streamsMap { + m := &streamsMap{ + ctx: ctx, + perspective: perspective, + queueControlFrame: queueControlFrame, + newFlowController: newFlowController, + maxIncomingBidiStreams: maxIncomingBidiStreams, + maxIncomingUniStreams: maxIncomingUniStreams, + sender: sender, + } + m.initMaps() + return m +} + +func (m *streamsMap) initMaps() { + m.outgoingBidiStreams = newOutgoingStreamsMap( + protocol.StreamTypeBidi, + func(num protocol.StreamNum) streamI { + id := num.StreamID(protocol.StreamTypeBidi, m.perspective) + return newStream(m.ctx, id, m.sender, m.newFlowController(id)) + }, + m.queueControlFrame, + ) + m.incomingBidiStreams = newIncomingStreamsMap( + protocol.StreamTypeBidi, + func(num protocol.StreamNum) streamI { + id := num.StreamID(protocol.StreamTypeBidi, m.perspective.Opposite()) + return newStream(m.ctx, id, m.sender, m.newFlowController(id)) + }, + m.maxIncomingBidiStreams, + m.queueControlFrame, + ) + m.outgoingUniStreams = newOutgoingStreamsMap( + protocol.StreamTypeUni, + func(num protocol.StreamNum) sendStreamI { + id := num.StreamID(protocol.StreamTypeUni, m.perspective) + return newSendStream(m.ctx, id, m.sender, m.newFlowController(id)) + }, + m.queueControlFrame, + ) + m.incomingUniStreams = newIncomingStreamsMap( + protocol.StreamTypeUni, + func(num protocol.StreamNum) receiveStreamI { + id := num.StreamID(protocol.StreamTypeUni, m.perspective.Opposite()) + return newReceiveStream(id, m.sender, m.newFlowController(id)) + }, + m.maxIncomingUniStreams, + m.queueControlFrame, + ) +} + +func (m *streamsMap) OpenStream() (Stream, error) { + m.mutex.Lock() + reset := m.reset + mm := m.outgoingBidiStreams + m.mutex.Unlock() + if reset { + return nil, Err0RTTRejected + } + str, err := mm.OpenStream() + return str, convertStreamError(err, protocol.StreamTypeBidi, m.perspective) +} + +func (m *streamsMap) OpenStreamSync(ctx context.Context) (Stream, error) { + m.mutex.Lock() + reset := m.reset + mm := m.outgoingBidiStreams + m.mutex.Unlock() + if reset { + return nil, Err0RTTRejected + } + str, err := mm.OpenStreamSync(ctx) + return str, convertStreamError(err, protocol.StreamTypeBidi, m.perspective) +} + +func (m *streamsMap) OpenUniStream() (SendStream, error) { + m.mutex.Lock() + reset := m.reset + mm := m.outgoingUniStreams + m.mutex.Unlock() + if reset { + return nil, Err0RTTRejected + } + str, err := mm.OpenStream() + return str, convertStreamError(err, protocol.StreamTypeBidi, m.perspective) +} + +func (m *streamsMap) OpenUniStreamSync(ctx context.Context) (SendStream, error) { + m.mutex.Lock() + reset := m.reset + mm := m.outgoingUniStreams + m.mutex.Unlock() + if reset { + return nil, Err0RTTRejected + } + str, err := mm.OpenStreamSync(ctx) + return str, convertStreamError(err, protocol.StreamTypeUni, m.perspective) +} + +func (m *streamsMap) AcceptStream(ctx context.Context) (Stream, error) { + m.mutex.Lock() + reset := m.reset + mm := m.incomingBidiStreams + m.mutex.Unlock() + if reset { + return nil, Err0RTTRejected + } + str, err := mm.AcceptStream(ctx) + return str, convertStreamError(err, protocol.StreamTypeBidi, m.perspective.Opposite()) +} + +func (m *streamsMap) AcceptUniStream(ctx context.Context) (ReceiveStream, error) { + m.mutex.Lock() + reset := m.reset + mm := m.incomingUniStreams + m.mutex.Unlock() + if reset { + return nil, Err0RTTRejected + } + str, err := mm.AcceptStream(ctx) + return str, convertStreamError(err, protocol.StreamTypeUni, m.perspective.Opposite()) +} + +func (m *streamsMap) DeleteStream(id protocol.StreamID) error { + num := id.StreamNum() + switch id.Type() { + case protocol.StreamTypeUni: + if id.InitiatedBy() == m.perspective { + return convertStreamError(m.outgoingUniStreams.DeleteStream(num), protocol.StreamTypeUni, m.perspective) + } + return convertStreamError(m.incomingUniStreams.DeleteStream(num), protocol.StreamTypeUni, m.perspective.Opposite()) + case protocol.StreamTypeBidi: + if id.InitiatedBy() == m.perspective { + return convertStreamError(m.outgoingBidiStreams.DeleteStream(num), protocol.StreamTypeBidi, m.perspective) + } + return convertStreamError(m.incomingBidiStreams.DeleteStream(num), protocol.StreamTypeBidi, m.perspective.Opposite()) + } + panic("") +} + +func (m *streamsMap) GetOrOpenReceiveStream(id protocol.StreamID) (receiveStreamI, error) { + str, err := m.getOrOpenReceiveStream(id) + if err != nil { + return nil, &qerr.TransportError{ + ErrorCode: qerr.StreamStateError, + ErrorMessage: err.Error(), + } + } + return str, nil +} + +func (m *streamsMap) getOrOpenReceiveStream(id protocol.StreamID) (receiveStreamI, error) { + num := id.StreamNum() + switch id.Type() { + case protocol.StreamTypeUni: + if id.InitiatedBy() == m.perspective { + // an outgoing unidirectional stream is a send stream, not a receive stream + return nil, fmt.Errorf("peer attempted to open receive stream %d", id) + } + str, err := m.incomingUniStreams.GetOrOpenStream(num) + return str, convertStreamError(err, protocol.StreamTypeUni, m.perspective) + case protocol.StreamTypeBidi: + var str receiveStreamI + var err error + if id.InitiatedBy() == m.perspective { + str, err = m.outgoingBidiStreams.GetStream(num) + } else { + str, err = m.incomingBidiStreams.GetOrOpenStream(num) + } + return str, convertStreamError(err, protocol.StreamTypeBidi, id.InitiatedBy()) + } + panic("") +} + +func (m *streamsMap) GetOrOpenSendStream(id protocol.StreamID) (sendStreamI, error) { + str, err := m.getOrOpenSendStream(id) + if err != nil { + return nil, &qerr.TransportError{ + ErrorCode: qerr.StreamStateError, + ErrorMessage: err.Error(), + } + } + return str, nil +} + +func (m *streamsMap) getOrOpenSendStream(id protocol.StreamID) (sendStreamI, error) { + num := id.StreamNum() + switch id.Type() { + case protocol.StreamTypeUni: + if id.InitiatedBy() == m.perspective { + str, err := m.outgoingUniStreams.GetStream(num) + return str, convertStreamError(err, protocol.StreamTypeUni, m.perspective) + } + // an incoming unidirectional stream is a receive stream, not a send stream + return nil, fmt.Errorf("peer attempted to open send stream %d", id) + case protocol.StreamTypeBidi: + var str sendStreamI + var err error + if id.InitiatedBy() == m.perspective { + str, err = m.outgoingBidiStreams.GetStream(num) + } else { + str, err = m.incomingBidiStreams.GetOrOpenStream(num) + } + return str, convertStreamError(err, protocol.StreamTypeBidi, id.InitiatedBy()) + } + panic("") +} + +func (m *streamsMap) HandleMaxStreamsFrame(f *wire.MaxStreamsFrame) { + switch f.Type { + case protocol.StreamTypeUni: + m.outgoingUniStreams.SetMaxStream(f.MaxStreamNum) + case protocol.StreamTypeBidi: + m.outgoingBidiStreams.SetMaxStream(f.MaxStreamNum) + } +} + +func (m *streamsMap) UpdateLimits(p *wire.TransportParameters) { + m.outgoingBidiStreams.UpdateSendWindow(p.InitialMaxStreamDataBidiRemote) + m.outgoingBidiStreams.SetMaxStream(p.MaxBidiStreamNum) + m.outgoingUniStreams.UpdateSendWindow(p.InitialMaxStreamDataUni) + m.outgoingUniStreams.SetMaxStream(p.MaxUniStreamNum) +} + +func (m *streamsMap) CloseWithError(err error) { + m.outgoingBidiStreams.CloseWithError(err) + m.outgoingUniStreams.CloseWithError(err) + m.incomingBidiStreams.CloseWithError(err) + m.incomingUniStreams.CloseWithError(err) +} + +// ResetFor0RTT resets is used when 0-RTT is rejected. In that case, the streams maps are +// 1. closed with an Err0RTTRejected, making calls to Open{Uni}Stream{Sync} / Accept{Uni}Stream return that error. +// 2. reset to their initial state, such that we can immediately process new incoming stream data. +// Afterwards, calls to Open{Uni}Stream{Sync} / Accept{Uni}Stream will continue to return the error, +// until UseResetMaps() has been called. +func (m *streamsMap) ResetFor0RTT() { + m.mutex.Lock() + defer m.mutex.Unlock() + m.reset = true + m.CloseWithError(Err0RTTRejected) + m.initMaps() +} + +func (m *streamsMap) UseResetMaps() { + m.mutex.Lock() + m.reset = false + m.mutex.Unlock() +} diff --git a/vendor/github.com/quic-go/quic-go/streams_map_incoming.go b/vendor/github.com/quic-go/quic-go/streams_map_incoming.go new file mode 100644 index 00000000..18ec6f99 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/streams_map_incoming.go @@ -0,0 +1,195 @@ +package quic + +import ( + "context" + "sync" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/wire" +) + +type incomingStream interface { + closeForShutdown(error) +} + +// When a stream is deleted before it was accepted, we can't delete it from the map immediately. +// We need to wait until the application accepts it, and delete it then. +type incomingStreamEntry[T incomingStream] struct { + stream T + shouldDelete bool +} + +type incomingStreamsMap[T incomingStream] struct { + mutex sync.RWMutex + newStreamChan chan struct{} + + streamType protocol.StreamType + streams map[protocol.StreamNum]incomingStreamEntry[T] + + nextStreamToAccept protocol.StreamNum // the next stream that will be returned by AcceptStream() + nextStreamToOpen protocol.StreamNum // the highest stream that the peer opened + maxStream protocol.StreamNum // the highest stream that the peer is allowed to open + maxNumStreams uint64 // maximum number of streams + + newStream func(protocol.StreamNum) T + queueMaxStreamID func(*wire.MaxStreamsFrame) + + closeErr error +} + +func newIncomingStreamsMap[T incomingStream]( + streamType protocol.StreamType, + newStream func(protocol.StreamNum) T, + maxStreams uint64, + queueControlFrame func(wire.Frame), +) *incomingStreamsMap[T] { + return &incomingStreamsMap[T]{ + newStreamChan: make(chan struct{}, 1), + streamType: streamType, + streams: make(map[protocol.StreamNum]incomingStreamEntry[T]), + maxStream: protocol.StreamNum(maxStreams), + maxNumStreams: maxStreams, + newStream: newStream, + nextStreamToOpen: 1, + nextStreamToAccept: 1, + queueMaxStreamID: func(f *wire.MaxStreamsFrame) { queueControlFrame(f) }, + } +} + +func (m *incomingStreamsMap[T]) AcceptStream(ctx context.Context) (T, error) { + // drain the newStreamChan, so we don't check the map twice if the stream doesn't exist + select { + case <-m.newStreamChan: + default: + } + + m.mutex.Lock() + + var num protocol.StreamNum + var entry incomingStreamEntry[T] + for { + num = m.nextStreamToAccept + if m.closeErr != nil { + m.mutex.Unlock() + return *new(T), m.closeErr + } + var ok bool + entry, ok = m.streams[num] + if ok { + break + } + m.mutex.Unlock() + select { + case <-ctx.Done(): + return *new(T), ctx.Err() + case <-m.newStreamChan: + } + m.mutex.Lock() + } + m.nextStreamToAccept++ + // If this stream was completed before being accepted, we can delete it now. + if entry.shouldDelete { + if err := m.deleteStream(num); err != nil { + m.mutex.Unlock() + return *new(T), err + } + } + m.mutex.Unlock() + return entry.stream, nil +} + +func (m *incomingStreamsMap[T]) GetOrOpenStream(num protocol.StreamNum) (T, error) { + m.mutex.RLock() + if num > m.maxStream { + m.mutex.RUnlock() + return *new(T), streamError{ + message: "peer tried to open stream %d (current limit: %d)", + nums: []protocol.StreamNum{num, m.maxStream}, + } + } + // if the num is smaller than the highest we accepted + // * this stream exists in the map, and we can return it, or + // * this stream was already closed, then we can return the nil + if num < m.nextStreamToOpen { + var s T + // If the stream was already queued for deletion, and is just waiting to be accepted, don't return it. + if entry, ok := m.streams[num]; ok && !entry.shouldDelete { + s = entry.stream + } + m.mutex.RUnlock() + return s, nil + } + m.mutex.RUnlock() + + m.mutex.Lock() + // no need to check the two error conditions from above again + // * maxStream can only increase, so if the id was valid before, it definitely is valid now + // * highestStream is only modified by this function + for newNum := m.nextStreamToOpen; newNum <= num; newNum++ { + m.streams[newNum] = incomingStreamEntry[T]{stream: m.newStream(newNum)} + select { + case m.newStreamChan <- struct{}{}: + default: + } + } + m.nextStreamToOpen = num + 1 + entry := m.streams[num] + m.mutex.Unlock() + return entry.stream, nil +} + +func (m *incomingStreamsMap[T]) DeleteStream(num protocol.StreamNum) error { + m.mutex.Lock() + defer m.mutex.Unlock() + + return m.deleteStream(num) +} + +func (m *incomingStreamsMap[T]) deleteStream(num protocol.StreamNum) error { + if _, ok := m.streams[num]; !ok { + return streamError{ + message: "tried to delete unknown incoming stream %d", + nums: []protocol.StreamNum{num}, + } + } + + // Don't delete this stream yet, if it was not yet accepted. + // Just save it to streamsToDelete map, to make sure it is deleted as soon as it gets accepted. + if num >= m.nextStreamToAccept { + entry, ok := m.streams[num] + if ok && entry.shouldDelete { + return streamError{ + message: "tried to delete incoming stream %d multiple times", + nums: []protocol.StreamNum{num}, + } + } + entry.shouldDelete = true + m.streams[num] = entry // can't assign to struct in map, so we need to reassign + return nil + } + + delete(m.streams, num) + // queue a MAX_STREAM_ID frame, giving the peer the option to open a new stream + if m.maxNumStreams > uint64(len(m.streams)) { + maxStream := m.nextStreamToOpen + protocol.StreamNum(m.maxNumStreams-uint64(len(m.streams))) - 1 + // Never send a value larger than protocol.MaxStreamCount. + if maxStream <= protocol.MaxStreamCount { + m.maxStream = maxStream + m.queueMaxStreamID(&wire.MaxStreamsFrame{ + Type: m.streamType, + MaxStreamNum: m.maxStream, + }) + } + } + return nil +} + +func (m *incomingStreamsMap[T]) CloseWithError(err error) { + m.mutex.Lock() + m.closeErr = err + for _, entry := range m.streams { + entry.stream.closeForShutdown(err) + } + m.mutex.Unlock() + close(m.newStreamChan) +} diff --git a/vendor/github.com/quic-go/quic-go/streams_map_outgoing.go b/vendor/github.com/quic-go/quic-go/streams_map_outgoing.go new file mode 100644 index 00000000..a8d04b04 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/streams_map_outgoing.go @@ -0,0 +1,230 @@ +package quic + +import ( + "context" + "sync" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/wire" +) + +type outgoingStream interface { + updateSendWindow(protocol.ByteCount) + closeForShutdown(error) +} + +type outgoingStreamsMap[T outgoingStream] struct { + mutex sync.RWMutex + + streamType protocol.StreamType + streams map[protocol.StreamNum]T + + openQueue map[uint64]chan struct{} + lowestInQueue uint64 + highestInQueue uint64 + + nextStream protocol.StreamNum // stream ID of the stream returned by OpenStream(Sync) + maxStream protocol.StreamNum // the maximum stream ID we're allowed to open + blockedSent bool // was a STREAMS_BLOCKED sent for the current maxStream + + newStream func(protocol.StreamNum) T + queueStreamIDBlocked func(*wire.StreamsBlockedFrame) + + closeErr error +} + +func newOutgoingStreamsMap[T outgoingStream]( + streamType protocol.StreamType, + newStream func(protocol.StreamNum) T, + queueControlFrame func(wire.Frame), +) *outgoingStreamsMap[T] { + return &outgoingStreamsMap[T]{ + streamType: streamType, + streams: make(map[protocol.StreamNum]T), + openQueue: make(map[uint64]chan struct{}), + maxStream: protocol.InvalidStreamNum, + nextStream: 1, + newStream: newStream, + queueStreamIDBlocked: func(f *wire.StreamsBlockedFrame) { queueControlFrame(f) }, + } +} + +func (m *outgoingStreamsMap[T]) OpenStream() (T, error) { + m.mutex.Lock() + defer m.mutex.Unlock() + + if m.closeErr != nil { + return *new(T), m.closeErr + } + + // if there are OpenStreamSync calls waiting, return an error here + if len(m.openQueue) > 0 || m.nextStream > m.maxStream { + m.maybeSendBlockedFrame() + return *new(T), streamOpenErr{&StreamLimitReachedError{}} + } + return m.openStream(), nil +} + +func (m *outgoingStreamsMap[T]) OpenStreamSync(ctx context.Context) (T, error) { + m.mutex.Lock() + defer m.mutex.Unlock() + + if m.closeErr != nil { + return *new(T), m.closeErr + } + + if err := ctx.Err(); err != nil { + return *new(T), err + } + + if len(m.openQueue) == 0 && m.nextStream <= m.maxStream { + return m.openStream(), nil + } + + waitChan := make(chan struct{}, 1) + queuePos := m.highestInQueue + m.highestInQueue++ + if len(m.openQueue) == 0 { + m.lowestInQueue = queuePos + } + m.openQueue[queuePos] = waitChan + m.maybeSendBlockedFrame() + + for { + m.mutex.Unlock() + select { + case <-ctx.Done(): + m.mutex.Lock() + delete(m.openQueue, queuePos) + return *new(T), ctx.Err() + case <-waitChan: + } + m.mutex.Lock() + + if m.closeErr != nil { + return *new(T), m.closeErr + } + if m.nextStream > m.maxStream { + // no stream available. Continue waiting + continue + } + str := m.openStream() + delete(m.openQueue, queuePos) + m.lowestInQueue = queuePos + 1 + m.unblockOpenSync() + return str, nil + } +} + +func (m *outgoingStreamsMap[T]) openStream() T { + s := m.newStream(m.nextStream) + m.streams[m.nextStream] = s + m.nextStream++ + return s +} + +// maybeSendBlockedFrame queues a STREAMS_BLOCKED frame for the current stream offset, +// if we haven't sent one for this offset yet +func (m *outgoingStreamsMap[T]) maybeSendBlockedFrame() { + if m.blockedSent { + return + } + + var streamNum protocol.StreamNum + if m.maxStream != protocol.InvalidStreamNum { + streamNum = m.maxStream + } + m.queueStreamIDBlocked(&wire.StreamsBlockedFrame{ + Type: m.streamType, + StreamLimit: streamNum, + }) + m.blockedSent = true +} + +func (m *outgoingStreamsMap[T]) GetStream(num protocol.StreamNum) (T, error) { + m.mutex.RLock() + if num >= m.nextStream { + m.mutex.RUnlock() + return *new(T), streamError{ + message: "peer attempted to open stream %d", + nums: []protocol.StreamNum{num}, + } + } + s := m.streams[num] + m.mutex.RUnlock() + return s, nil +} + +func (m *outgoingStreamsMap[T]) DeleteStream(num protocol.StreamNum) error { + m.mutex.Lock() + defer m.mutex.Unlock() + + if _, ok := m.streams[num]; !ok { + return streamError{ + message: "tried to delete unknown outgoing stream %d", + nums: []protocol.StreamNum{num}, + } + } + delete(m.streams, num) + return nil +} + +func (m *outgoingStreamsMap[T]) SetMaxStream(num protocol.StreamNum) { + m.mutex.Lock() + defer m.mutex.Unlock() + + if num <= m.maxStream { + return + } + m.maxStream = num + m.blockedSent = false + if m.maxStream < m.nextStream-1+protocol.StreamNum(len(m.openQueue)) { + m.maybeSendBlockedFrame() + } + m.unblockOpenSync() +} + +// UpdateSendWindow is called when the peer's transport parameters are received. +// Only in the case of a 0-RTT handshake will we have open streams at this point. +// We might need to update the send window, in case the server increased it. +func (m *outgoingStreamsMap[T]) UpdateSendWindow(limit protocol.ByteCount) { + m.mutex.Lock() + for _, str := range m.streams { + str.updateSendWindow(limit) + } + m.mutex.Unlock() +} + +// unblockOpenSync unblocks the next OpenStreamSync go-routine to open a new stream +func (m *outgoingStreamsMap[T]) unblockOpenSync() { + if len(m.openQueue) == 0 { + return + } + for qp := m.lowestInQueue; qp <= m.highestInQueue; qp++ { + c, ok := m.openQueue[qp] + if !ok { // entry was deleted because the context was canceled + continue + } + // unblockOpenSync is called both from OpenStreamSync and from SetMaxStream. + // It's sufficient to only unblock OpenStreamSync once. + select { + case c <- struct{}{}: + default: + } + return + } +} + +func (m *outgoingStreamsMap[T]) CloseWithError(err error) { + m.mutex.Lock() + m.closeErr = err + for _, str := range m.streams { + str.closeForShutdown(err) + } + for _, c := range m.openQueue { + if c != nil { + close(c) + } + } + m.mutex.Unlock() +} diff --git a/vendor/github.com/quic-go/quic-go/sys_conn.go b/vendor/github.com/quic-go/quic-go/sys_conn.go new file mode 100644 index 00000000..71cc4607 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/sys_conn.go @@ -0,0 +1,117 @@ +package quic + +import ( + "log" + "net" + "os" + "strconv" + "strings" + "syscall" + "time" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" +) + +// OOBCapablePacketConn is a connection that allows the reading of ECN bits from the IP header. +// If the PacketConn passed to Dial or Listen satisfies this interface, quic-go will use it. +// In this case, ReadMsgUDP() will be used instead of ReadFrom() to read packets. +type OOBCapablePacketConn interface { + net.PacketConn + SyscallConn() (syscall.RawConn, error) + SetReadBuffer(int) error + ReadMsgUDP(b, oob []byte) (n, oobn, flags int, addr *net.UDPAddr, err error) + WriteMsgUDP(b, oob []byte, addr *net.UDPAddr) (n, oobn int, err error) +} + +var _ OOBCapablePacketConn = &net.UDPConn{} + +func wrapConn(pc net.PacketConn) (rawConn, error) { + if err := setReceiveBuffer(pc); err != nil { + if !strings.Contains(err.Error(), "use of closed network connection") { + setBufferWarningOnce.Do(func() { + if disable, _ := strconv.ParseBool(os.Getenv("QUIC_GO_DISABLE_RECEIVE_BUFFER_WARNING")); disable { + return + } + log.Printf("%s. See https://github.com/quic-go/quic-go/wiki/UDP-Buffer-Sizes for details.", err) + }) + } + } + if err := setSendBuffer(pc); err != nil { + if !strings.Contains(err.Error(), "use of closed network connection") { + setBufferWarningOnce.Do(func() { + if disable, _ := strconv.ParseBool(os.Getenv("QUIC_GO_DISABLE_RECEIVE_BUFFER_WARNING")); disable { + return + } + log.Printf("%s. See https://github.com/quic-go/quic-go/wiki/UDP-Buffer-Sizes for details.", err) + }) + } + } + + conn, ok := pc.(interface { + SyscallConn() (syscall.RawConn, error) + }) + var supportsDF bool + if ok { + rawConn, err := conn.SyscallConn() + if err != nil { + return nil, err + } + + if _, ok := pc.LocalAddr().(*net.UDPAddr); ok { + // Only set DF on sockets that we expect to be able to handle that configuration. + var err error + supportsDF, err = setDF(rawConn) + if err != nil { + return nil, err + } + } + } + c, ok := pc.(OOBCapablePacketConn) + if !ok { + utils.DefaultLogger.Infof("PacketConn is not a net.UDPConn. Disabling optimizations possible on UDP connections.") + return &basicConn{PacketConn: pc, supportsDF: supportsDF}, nil + } + return newConn(c, supportsDF) +} + +// The basicConn is the most trivial implementation of a rawConn. +// It reads a single packet from the underlying net.PacketConn. +// It is used when +// * the net.PacketConn is not a OOBCapablePacketConn, and +// * when the OS doesn't support OOB. +type basicConn struct { + net.PacketConn + supportsDF bool +} + +var _ rawConn = &basicConn{} + +func (c *basicConn) ReadPacket() (receivedPacket, error) { + buffer := getPacketBuffer() + // The packet size should not exceed protocol.MaxPacketBufferSize bytes + // If it does, we only read a truncated packet, which will then end up undecryptable + buffer.Data = buffer.Data[:protocol.MaxPacketBufferSize] + n, addr, err := c.PacketConn.ReadFrom(buffer.Data) + if err != nil { + return receivedPacket{}, err + } + return receivedPacket{ + remoteAddr: addr, + rcvTime: time.Now(), + data: buffer.Data[:n], + buffer: buffer, + }, nil +} + +func (c *basicConn) WritePacket(b []byte, addr net.Addr, _ []byte, gsoSize uint16, ecn protocol.ECN) (n int, err error) { + if gsoSize != 0 { + panic("cannot use GSO with a basicConn") + } + if ecn != protocol.ECNUnsupported { + panic("cannot use ECN with a basicConn") + } + return c.PacketConn.WriteTo(b, addr) +} + +func (c *basicConn) capabilities() connCapabilities { return connCapabilities{DF: c.supportsDF} } diff --git a/vendor/github.com/quic-go/quic-go/sys_conn_buffers.go b/vendor/github.com/quic-go/quic-go/sys_conn_buffers.go new file mode 100644 index 00000000..8fe49162 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/sys_conn_buffers.go @@ -0,0 +1,68 @@ +package quic + +import ( + "errors" + "fmt" + "net" + "syscall" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" +) + +//go:generate sh -c "echo '// Code generated by go generate. DO NOT EDIT.\n// Source: sys_conn_buffers.go\n' > sys_conn_buffers_write.go && sed -e 's/SetReadBuffer/SetWriteBuffer/g' -e 's/setReceiveBuffer/setSendBuffer/g' -e 's/inspectReadBuffer/inspectWriteBuffer/g' -e 's/protocol\\.DesiredReceiveBufferSize/protocol\\.DesiredSendBufferSize/g' -e 's/forceSetReceiveBuffer/forceSetSendBuffer/g' -e 's/receive buffer/send buffer/g' sys_conn_buffers.go | sed '/^\\/\\/go:generate/d' >> sys_conn_buffers_write.go" +func setReceiveBuffer(c net.PacketConn) error { + conn, ok := c.(interface{ SetReadBuffer(int) error }) + if !ok { + return errors.New("connection doesn't allow setting of receive buffer size. Not a *net.UDPConn?") + } + + var syscallConn syscall.RawConn + if sc, ok := c.(interface { + SyscallConn() (syscall.RawConn, error) + }); ok { + var err error + syscallConn, err = sc.SyscallConn() + if err != nil { + syscallConn = nil + } + } + // The connection has a SetReadBuffer method, but we couldn't obtain a syscall.RawConn. + // This shouldn't happen for a net.UDPConn, but is possible if the connection just implements the + // net.PacketConn interface and the SetReadBuffer method. + // We have no way of checking if increasing the buffer size actually worked. + if syscallConn == nil { + return conn.SetReadBuffer(protocol.DesiredReceiveBufferSize) + } + + size, err := inspectReadBuffer(syscallConn) + if err != nil { + return fmt.Errorf("failed to determine receive buffer size: %w", err) + } + if size >= protocol.DesiredReceiveBufferSize { + utils.DefaultLogger.Debugf("Conn has receive buffer of %d kiB (wanted: at least %d kiB)", size/1024, protocol.DesiredReceiveBufferSize/1024) + return nil + } + // Ignore the error. We check if we succeeded by querying the buffer size afterward. + _ = conn.SetReadBuffer(protocol.DesiredReceiveBufferSize) + newSize, err := inspectReadBuffer(syscallConn) + if newSize < protocol.DesiredReceiveBufferSize { + // Try again with RCVBUFFORCE on Linux + _ = forceSetReceiveBuffer(syscallConn, protocol.DesiredReceiveBufferSize) + newSize, err = inspectReadBuffer(syscallConn) + if err != nil { + return fmt.Errorf("failed to determine receive buffer size: %w", err) + } + } + if err != nil { + return fmt.Errorf("failed to determine receive buffer size: %w", err) + } + if newSize == size { + return fmt.Errorf("failed to increase receive buffer size (wanted: %d kiB, got %d kiB)", protocol.DesiredReceiveBufferSize/1024, newSize/1024) + } + if newSize < protocol.DesiredReceiveBufferSize { + return fmt.Errorf("failed to sufficiently increase receive buffer size (was: %d kiB, wanted: %d kiB, got: %d kiB)", size/1024, protocol.DesiredReceiveBufferSize/1024, newSize/1024) + } + utils.DefaultLogger.Debugf("Increased receive buffer size to %d kiB", newSize/1024) + return nil +} diff --git a/vendor/github.com/quic-go/quic-go/sys_conn_buffers_write.go b/vendor/github.com/quic-go/quic-go/sys_conn_buffers_write.go new file mode 100644 index 00000000..c01a931b --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/sys_conn_buffers_write.go @@ -0,0 +1,70 @@ +// Code generated by go generate. DO NOT EDIT. +// Source: sys_conn_buffers.go + +package quic + +import ( + "errors" + "fmt" + "net" + "syscall" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" +) + +func setSendBuffer(c net.PacketConn) error { + conn, ok := c.(interface{ SetWriteBuffer(int) error }) + if !ok { + return errors.New("connection doesn't allow setting of send buffer size. Not a *net.UDPConn?") + } + + var syscallConn syscall.RawConn + if sc, ok := c.(interface { + SyscallConn() (syscall.RawConn, error) + }); ok { + var err error + syscallConn, err = sc.SyscallConn() + if err != nil { + syscallConn = nil + } + } + // The connection has a SetWriteBuffer method, but we couldn't obtain a syscall.RawConn. + // This shouldn't happen for a net.UDPConn, but is possible if the connection just implements the + // net.PacketConn interface and the SetWriteBuffer method. + // We have no way of checking if increasing the buffer size actually worked. + if syscallConn == nil { + return conn.SetWriteBuffer(protocol.DesiredSendBufferSize) + } + + size, err := inspectWriteBuffer(syscallConn) + if err != nil { + return fmt.Errorf("failed to determine send buffer size: %w", err) + } + if size >= protocol.DesiredSendBufferSize { + utils.DefaultLogger.Debugf("Conn has send buffer of %d kiB (wanted: at least %d kiB)", size/1024, protocol.DesiredSendBufferSize/1024) + return nil + } + // Ignore the error. We check if we succeeded by querying the buffer size afterward. + _ = conn.SetWriteBuffer(protocol.DesiredSendBufferSize) + newSize, err := inspectWriteBuffer(syscallConn) + if newSize < protocol.DesiredSendBufferSize { + // Try again with RCVBUFFORCE on Linux + _ = forceSetSendBuffer(syscallConn, protocol.DesiredSendBufferSize) + newSize, err = inspectWriteBuffer(syscallConn) + if err != nil { + return fmt.Errorf("failed to determine send buffer size: %w", err) + } + } + if err != nil { + return fmt.Errorf("failed to determine send buffer size: %w", err) + } + if newSize == size { + return fmt.Errorf("failed to increase send buffer size (wanted: %d kiB, got %d kiB)", protocol.DesiredSendBufferSize/1024, newSize/1024) + } + if newSize < protocol.DesiredSendBufferSize { + return fmt.Errorf("failed to sufficiently increase send buffer size (was: %d kiB, wanted: %d kiB, got: %d kiB)", size/1024, protocol.DesiredSendBufferSize/1024, newSize/1024) + } + utils.DefaultLogger.Debugf("Increased send buffer size to %d kiB", newSize/1024) + return nil +} diff --git a/vendor/github.com/quic-go/quic-go/sys_conn_df.go b/vendor/github.com/quic-go/quic-go/sys_conn_df.go new file mode 100644 index 00000000..0db61509 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/sys_conn_df.go @@ -0,0 +1,22 @@ +//go:build !linux && !windows && !darwin + +package quic + +import ( + "syscall" +) + +func setDF(syscall.RawConn) (bool, error) { + // no-op on unsupported platforms + return false, nil +} + +func isSendMsgSizeErr(err error) bool { + // to be implemented for more specific platforms + return false +} + +func isRecvMsgSizeErr(err error) bool { + // to be implemented for more specific platforms + return false +} diff --git a/vendor/github.com/quic-go/quic-go/sys_conn_df_darwin.go b/vendor/github.com/quic-go/quic-go/sys_conn_df_darwin.go new file mode 100644 index 00000000..b51cd8f1 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/sys_conn_df_darwin.go @@ -0,0 +1,74 @@ +//go:build darwin + +package quic + +import ( + "errors" + "strconv" + "strings" + "syscall" + + "golang.org/x/sys/unix" + + "github.com/quic-go/quic-go/internal/utils" +) + +func setDF(rawConn syscall.RawConn) (bool, error) { + // Setting DF bit is only supported from macOS11 + // https://github.com/chromium/chromium/blob/117.0.5881.2/net/socket/udp_socket_posix.cc#L555 + if supportsDF, err := isAtLeastMacOS11(); !supportsDF || err != nil { + return false, err + } + + // Enabling IP_DONTFRAG will force the kernel to return "sendto: message too long" + // and the datagram will not be fragmented + var errDFIPv4, errDFIPv6 error + if err := rawConn.Control(func(fd uintptr) { + errDFIPv4 = unix.SetsockoptInt(int(fd), unix.IPPROTO_IP, unix.IP_DONTFRAG, 1) + errDFIPv6 = unix.SetsockoptInt(int(fd), unix.IPPROTO_IPV6, unix.IPV6_DONTFRAG, 1) + }); err != nil { + return false, err + } + switch { + case errDFIPv4 == nil && errDFIPv6 == nil: + utils.DefaultLogger.Debugf("Setting DF for IPv4 and IPv6.") + case errDFIPv4 == nil && errDFIPv6 != nil: + utils.DefaultLogger.Debugf("Setting DF for IPv4.") + case errDFIPv4 != nil && errDFIPv6 == nil: + utils.DefaultLogger.Debugf("Setting DF for IPv6.") + // On macOS, the syscall for setting DF bit for IPv4 fails on dual-stack listeners. + // Treat the connection as not having DF enabled, even though the DF bit will be set + // when used for IPv6. + // See https://github.com/quic-go/quic-go/issues/3793 for details. + return false, nil + case errDFIPv4 != nil && errDFIPv6 != nil: + return false, errors.New("setting DF failed for both IPv4 and IPv6") + } + return true, nil +} + +func isSendMsgSizeErr(err error) bool { + return errors.Is(err, unix.EMSGSIZE) +} + +func isRecvMsgSizeErr(error) bool { return false } + +func isAtLeastMacOS11() (bool, error) { + uname := &unix.Utsname{} + err := unix.Uname(uname) + if err != nil { + return false, err + } + + release := string(uname.Release[:]) + if idx := strings.Index(release, "."); idx != -1 { + version, err := strconv.Atoi(release[:idx]) + if err != nil { + return false, err + } + // Darwin version 20 is macOS version 11 + // https://en.wikipedia.org/wiki/Darwin_(operating_system)#Darwin_20_onwards + return version >= 20, nil + } + return false, nil +} diff --git a/vendor/github.com/quic-go/quic-go/sys_conn_df_linux.go b/vendor/github.com/quic-go/quic-go/sys_conn_df_linux.go new file mode 100644 index 00000000..f09eaa5d --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/sys_conn_df_linux.go @@ -0,0 +1,42 @@ +//go:build linux + +package quic + +import ( + "errors" + "syscall" + + "golang.org/x/sys/unix" + + "github.com/quic-go/quic-go/internal/utils" +) + +func setDF(rawConn syscall.RawConn) (bool, error) { + // Enabling IP_MTU_DISCOVER will force the kernel to return "sendto: message too long" + // and the datagram will not be fragmented + var errDFIPv4, errDFIPv6 error + if err := rawConn.Control(func(fd uintptr) { + errDFIPv4 = unix.SetsockoptInt(int(fd), unix.IPPROTO_IP, unix.IP_MTU_DISCOVER, unix.IP_PMTUDISC_DO) + errDFIPv6 = unix.SetsockoptInt(int(fd), unix.IPPROTO_IPV6, unix.IPV6_MTU_DISCOVER, unix.IPV6_PMTUDISC_DO) + }); err != nil { + return false, err + } + switch { + case errDFIPv4 == nil && errDFIPv6 == nil: + utils.DefaultLogger.Debugf("Setting DF for IPv4 and IPv6.") + case errDFIPv4 == nil && errDFIPv6 != nil: + utils.DefaultLogger.Debugf("Setting DF for IPv4.") + case errDFIPv4 != nil && errDFIPv6 == nil: + utils.DefaultLogger.Debugf("Setting DF for IPv6.") + case errDFIPv4 != nil && errDFIPv6 != nil: + return false, errors.New("setting DF failed for both IPv4 and IPv6") + } + return true, nil +} + +func isSendMsgSizeErr(err error) bool { + // https://man7.org/linux/man-pages/man7/udp.7.html + return errors.Is(err, unix.EMSGSIZE) +} + +func isRecvMsgSizeErr(error) bool { return false } diff --git a/vendor/github.com/quic-go/quic-go/sys_conn_df_windows.go b/vendor/github.com/quic-go/quic-go/sys_conn_df_windows.go new file mode 100644 index 00000000..e27635ec --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/sys_conn_df_windows.go @@ -0,0 +1,54 @@ +//go:build windows + +package quic + +import ( + "errors" + "syscall" + + "golang.org/x/sys/windows" + + "github.com/quic-go/quic-go/internal/utils" +) + +const ( + // IP_DONTFRAGMENT controls the Don't Fragment (DF) bit. + // + // It's the same code point for both IPv4 and IPv6 on Windows. + // https://microsoft.github.io/windows-docs-rs/doc/windows/Win32/Networking/WinSock/constant.IP_DONTFRAG.html + // https://microsoft.github.io/windows-docs-rs/doc/windows/Win32/Networking/WinSock/constant.IPV6_DONTFRAG.html + // + //nolint:stylecheck + IP_DONTFRAGMENT = 14 +) + +func setDF(rawConn syscall.RawConn) (bool, error) { + var errDFIPv4, errDFIPv6 error + if err := rawConn.Control(func(fd uintptr) { + errDFIPv4 = windows.SetsockoptInt(windows.Handle(fd), windows.IPPROTO_IP, IP_DONTFRAGMENT, 1) + errDFIPv6 = windows.SetsockoptInt(windows.Handle(fd), windows.IPPROTO_IPV6, IP_DONTFRAGMENT, 1) + }); err != nil { + return false, err + } + switch { + case errDFIPv4 == nil && errDFIPv6 == nil: + utils.DefaultLogger.Debugf("Setting DF for IPv4 and IPv6.") + case errDFIPv4 == nil && errDFIPv6 != nil: + utils.DefaultLogger.Debugf("Setting DF for IPv4.") + case errDFIPv4 != nil && errDFIPv6 == nil: + utils.DefaultLogger.Debugf("Setting DF for IPv6.") + case errDFIPv4 != nil && errDFIPv6 != nil: + return false, errors.New("setting DF failed for both IPv4 and IPv6") + } + return true, nil +} + +func isSendMsgSizeErr(err error) bool { + // https://docs.microsoft.com/en-us/windows/win32/winsock/windows-sockets-error-codes-2 + return errors.Is(err, windows.WSAEMSGSIZE) +} + +func isRecvMsgSizeErr(err error) bool { + // https://docs.microsoft.com/en-us/windows/win32/winsock/windows-sockets-error-codes-2 + return errors.Is(err, windows.WSAEMSGSIZE) +} diff --git a/vendor/github.com/quic-go/quic-go/sys_conn_helper_darwin.go b/vendor/github.com/quic-go/quic-go/sys_conn_helper_darwin.go new file mode 100644 index 00000000..545502dd --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/sys_conn_helper_darwin.go @@ -0,0 +1,38 @@ +//go:build darwin + +package quic + +import ( + "encoding/binary" + "net/netip" + "syscall" + + "golang.org/x/sys/unix" +) + +const ( + msgTypeIPTOS = unix.IP_RECVTOS + ipv4PKTINFO = unix.IP_RECVPKTINFO +) + +const ecnIPv4DataLen = 4 + +// ReadBatch only returns a single packet on OSX, +// see https://godoc.org/golang.org/x/net/ipv4#PacketConn.ReadBatch. +const batchSize = 1 + +func parseIPv4PktInfo(body []byte) (ip netip.Addr, ifIndex uint32, ok bool) { + // struct in_pktinfo { + // unsigned int ipi_ifindex; /* Interface index */ + // struct in_addr ipi_spec_dst; /* Local address */ + // struct in_addr ipi_addr; /* Header Destination address */ + // }; + if len(body) != 12 { + return netip.Addr{}, 0, false + } + return netip.AddrFrom4(*(*[4]byte)(body[8:12])), binary.LittleEndian.Uint32(body), true +} + +func isGSOEnabled(syscall.RawConn) bool { return false } + +func isECNEnabled() bool { return !isECNDisabledUsingEnv() } diff --git a/vendor/github.com/quic-go/quic-go/sys_conn_helper_freebsd.go b/vendor/github.com/quic-go/quic-go/sys_conn_helper_freebsd.go new file mode 100644 index 00000000..521f80d4 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/sys_conn_helper_freebsd.go @@ -0,0 +1,33 @@ +//go:build freebsd + +package quic + +import ( + "net/netip" + "syscall" + + "golang.org/x/sys/unix" +) + +const ( + msgTypeIPTOS = unix.IP_RECVTOS + ipv4PKTINFO = 0x7 +) + +const ecnIPv4DataLen = 1 + +const batchSize = 8 + +func parseIPv4PktInfo(body []byte) (ip netip.Addr, _ uint32, ok bool) { + // struct in_pktinfo { + // struct in_addr ipi_addr; /* Header Destination address */ + // }; + if len(body) != 4 { + return netip.Addr{}, 0, false + } + return netip.AddrFrom4(*(*[4]byte)(body)), 0, true +} + +func isGSOEnabled(syscall.RawConn) bool { return false } + +func isECNEnabled() bool { return !isECNDisabledUsingEnv() } diff --git a/vendor/github.com/quic-go/quic-go/sys_conn_helper_linux.go b/vendor/github.com/quic-go/quic-go/sys_conn_helper_linux.go new file mode 100644 index 00000000..eec12719 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/sys_conn_helper_linux.go @@ -0,0 +1,156 @@ +//go:build linux + +package quic + +import ( + "encoding/binary" + "errors" + "net/netip" + "os" + "strconv" + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + msgTypeIPTOS = unix.IP_TOS + ipv4PKTINFO = unix.IP_PKTINFO +) + +const ecnIPv4DataLen = 1 + +const batchSize = 8 // needs to smaller than MaxUint8 (otherwise the type of oobConn.readPos has to be changed) + +var kernelVersionMajor int + +func init() { + kernelVersionMajor, _ = kernelVersion() +} + +func forceSetReceiveBuffer(c syscall.RawConn, bytes int) error { + var serr error + if err := c.Control(func(fd uintptr) { + serr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_RCVBUFFORCE, bytes) + }); err != nil { + return err + } + return serr +} + +func forceSetSendBuffer(c syscall.RawConn, bytes int) error { + var serr error + if err := c.Control(func(fd uintptr) { + serr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_SNDBUFFORCE, bytes) + }); err != nil { + return err + } + return serr +} + +func parseIPv4PktInfo(body []byte) (ip netip.Addr, ifIndex uint32, ok bool) { + // struct in_pktinfo { + // unsigned int ipi_ifindex; /* Interface index */ + // struct in_addr ipi_spec_dst; /* Local address */ + // struct in_addr ipi_addr; /* Header Destination address */ + // }; + if len(body) != 12 { + return netip.Addr{}, 0, false + } + return netip.AddrFrom4(*(*[4]byte)(body[8:12])), binary.LittleEndian.Uint32(body), true +} + +// isGSOEnabled tests if the kernel supports GSO. +// Sending with GSO might still fail later on, if the interface doesn't support it (see isGSOError). +func isGSOEnabled(conn syscall.RawConn) bool { + if kernelVersionMajor < 5 { + return false + } + disabled, err := strconv.ParseBool(os.Getenv("QUIC_GO_DISABLE_GSO")) + if err == nil && disabled { + return false + } + var serr error + if err := conn.Control(func(fd uintptr) { + _, serr = unix.GetsockoptInt(int(fd), unix.IPPROTO_UDP, unix.UDP_SEGMENT) + }); err != nil { + return false + } + return serr == nil +} + +func appendUDPSegmentSizeMsg(b []byte, size uint16) []byte { + startLen := len(b) + const dataLen = 2 // payload is a uint16 + b = append(b, make([]byte, unix.CmsgSpace(dataLen))...) + h := (*unix.Cmsghdr)(unsafe.Pointer(&b[startLen])) + h.Level = syscall.IPPROTO_UDP + h.Type = unix.UDP_SEGMENT + h.SetLen(unix.CmsgLen(dataLen)) + + // UnixRights uses the private `data` method, but I *think* this achieves the same goal. + offset := startLen + unix.CmsgSpace(0) + *(*uint16)(unsafe.Pointer(&b[offset])) = size + return b +} + +func isGSOError(err error) bool { + var serr *os.SyscallError + if errors.As(err, &serr) { + // EIO is returned by udp_send_skb() if the device driver does not have tx checksums enabled, + // which is a hard requirement of UDP_SEGMENT. See: + // https://git.kernel.org/pub/scm/docs/man-pages/man-pages.git/tree/man7/udp.7?id=806eabd74910447f21005160e90957bde4db0183#n228 + // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/net/ipv4/udp.c?h=v6.2&id=c9c3395d5e3dcc6daee66c6908354d47bf98cb0c#n942 + return serr.Err == unix.EIO + } + return false +} + +// The first sendmsg call on a new UDP socket sometimes errors on Linux. +// It's not clear why this happens. +// See https://github.com/golang/go/issues/63322. +func isPermissionError(err error) bool { + var serr *os.SyscallError + if errors.As(err, &serr) { + return serr.Syscall == "sendmsg" && serr.Err == unix.EPERM + } + return false +} + +func isECNEnabled() bool { + return kernelVersionMajor >= 5 && !isECNDisabledUsingEnv() +} + +// kernelVersion returns major and minor kernel version numbers, parsed from +// the syscall.Uname's Release field, or 0, 0 if the version can't be obtained +// or parsed. +// +// copied from the standard library's internal/syscall/unix/kernel_version_linux.go +func kernelVersion() (major, minor int) { + var uname syscall.Utsname + if err := syscall.Uname(&uname); err != nil { + return + } + + var ( + values [2]int + value, vi int + ) + for _, c := range uname.Release { + if '0' <= c && c <= '9' { + value = (value * 10) + int(c-'0') + } else { + // Note that we're assuming N.N.N here. + // If we see anything else, we are likely to mis-parse it. + values[vi] = value + vi++ + if vi >= len(values) { + break + } + value = 0 + } + } + + return values[0], values[1] +} diff --git a/vendor/github.com/quic-go/quic-go/sys_conn_helper_nonlinux.go b/vendor/github.com/quic-go/quic-go/sys_conn_helper_nonlinux.go new file mode 100644 index 00000000..f8d69803 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/sys_conn_helper_nonlinux.go @@ -0,0 +1,10 @@ +//go:build !linux + +package quic + +func forceSetReceiveBuffer(c any, bytes int) error { return nil } +func forceSetSendBuffer(c any, bytes int) error { return nil } + +func appendUDPSegmentSizeMsg([]byte, uint16) []byte { return nil } +func isGSOError(error) bool { return false } +func isPermissionError(err error) bool { return false } diff --git a/vendor/github.com/quic-go/quic-go/sys_conn_no_oob.go b/vendor/github.com/quic-go/quic-go/sys_conn_no_oob.go new file mode 100644 index 00000000..2a1f807e --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/sys_conn_no_oob.go @@ -0,0 +1,21 @@ +//go:build !darwin && !linux && !freebsd && !windows + +package quic + +import ( + "net" + "net/netip" +) + +func newConn(c net.PacketConn, supportsDF bool) (*basicConn, error) { + return &basicConn{PacketConn: c, supportsDF: supportsDF}, nil +} + +func inspectReadBuffer(any) (int, error) { return 0, nil } +func inspectWriteBuffer(any) (int, error) { return 0, nil } + +type packetInfo struct { + addr netip.Addr +} + +func (i *packetInfo) OOB() []byte { return nil } diff --git a/vendor/github.com/quic-go/quic-go/sys_conn_oob.go b/vendor/github.com/quic-go/quic-go/sys_conn_oob.go new file mode 100644 index 00000000..a6795ca2 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/sys_conn_oob.go @@ -0,0 +1,331 @@ +//go:build darwin || linux || freebsd + +package quic + +import ( + "encoding/binary" + "errors" + "log" + "net" + "net/netip" + "os" + "strconv" + "sync" + "syscall" + "time" + "unsafe" + + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" + "golang.org/x/sys/unix" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" +) + +const ( + ecnMask = 0x3 + oobBufferSize = 128 +) + +// Contrary to what the naming suggests, the ipv{4,6}.Message is not dependent on the IP version. +// They're both just aliases for x/net/internal/socket.Message. +// This means we can use this struct to read from a socket that receives both IPv4 and IPv6 messages. +var _ ipv4.Message = ipv6.Message{} + +type batchConn interface { + ReadBatch(ms []ipv4.Message, flags int) (int, error) +} + +func inspectReadBuffer(c syscall.RawConn) (int, error) { + var size int + var serr error + if err := c.Control(func(fd uintptr) { + size, serr = unix.GetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_RCVBUF) + }); err != nil { + return 0, err + } + return size, serr +} + +func inspectWriteBuffer(c syscall.RawConn) (int, error) { + var size int + var serr error + if err := c.Control(func(fd uintptr) { + size, serr = unix.GetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_SNDBUF) + }); err != nil { + return 0, err + } + return size, serr +} + +func isECNDisabledUsingEnv() bool { + disabled, err := strconv.ParseBool(os.Getenv("QUIC_GO_DISABLE_ECN")) + return err == nil && disabled +} + +type oobConn struct { + OOBCapablePacketConn + batchConn batchConn + + readPos uint8 + // Packets received from the kernel, but not yet returned by ReadPacket(). + messages []ipv4.Message + buffers [batchSize]*packetBuffer + + cap connCapabilities +} + +var _ rawConn = &oobConn{} + +func newConn(c OOBCapablePacketConn, supportsDF bool) (*oobConn, error) { + rawConn, err := c.SyscallConn() + if err != nil { + return nil, err + } + needsPacketInfo := false + if udpAddr, ok := c.LocalAddr().(*net.UDPAddr); ok && udpAddr.IP.IsUnspecified() { + needsPacketInfo = true + } + // We don't know if this a IPv4-only, IPv6-only or a IPv4-and-IPv6 connection. + // Try enabling receiving of ECN and packet info for both IP versions. + // We expect at least one of those syscalls to succeed. + var errECNIPv4, errECNIPv6, errPIIPv4, errPIIPv6 error + if err := rawConn.Control(func(fd uintptr) { + errECNIPv4 = unix.SetsockoptInt(int(fd), unix.IPPROTO_IP, unix.IP_RECVTOS, 1) + errECNIPv6 = unix.SetsockoptInt(int(fd), unix.IPPROTO_IPV6, unix.IPV6_RECVTCLASS, 1) + + if needsPacketInfo { + errPIIPv4 = unix.SetsockoptInt(int(fd), unix.IPPROTO_IP, ipv4PKTINFO, 1) + errPIIPv6 = unix.SetsockoptInt(int(fd), unix.IPPROTO_IPV6, unix.IPV6_RECVPKTINFO, 1) + } + }); err != nil { + return nil, err + } + switch { + case errECNIPv4 == nil && errECNIPv6 == nil: + utils.DefaultLogger.Debugf("Activating reading of ECN bits for IPv4 and IPv6.") + case errECNIPv4 == nil && errECNIPv6 != nil: + utils.DefaultLogger.Debugf("Activating reading of ECN bits for IPv4.") + case errECNIPv4 != nil && errECNIPv6 == nil: + utils.DefaultLogger.Debugf("Activating reading of ECN bits for IPv6.") + case errECNIPv4 != nil && errECNIPv6 != nil: + return nil, errors.New("activating ECN failed for both IPv4 and IPv6") + } + if needsPacketInfo { + switch { + case errPIIPv4 == nil && errPIIPv6 == nil: + utils.DefaultLogger.Debugf("Activating reading of packet info for IPv4 and IPv6.") + case errPIIPv4 == nil && errPIIPv6 != nil: + utils.DefaultLogger.Debugf("Activating reading of packet info bits for IPv4.") + case errPIIPv4 != nil && errPIIPv6 == nil: + utils.DefaultLogger.Debugf("Activating reading of packet info bits for IPv6.") + case errPIIPv4 != nil && errPIIPv6 != nil: + return nil, errors.New("activating packet info failed for both IPv4 and IPv6") + } + } + + // Allows callers to pass in a connection that already satisfies batchConn interface + // to make use of the optimisation. Otherwise, ipv4.NewPacketConn would unwrap the file descriptor + // via SyscallConn(), and read it that way, which might not be what the caller wants. + var bc batchConn + if ibc, ok := c.(batchConn); ok { + bc = ibc + } else { + bc = ipv4.NewPacketConn(c) + } + + msgs := make([]ipv4.Message, batchSize) + for i := range msgs { + // preallocate the [][]byte + msgs[i].Buffers = make([][]byte, 1) + } + oobConn := &oobConn{ + OOBCapablePacketConn: c, + batchConn: bc, + messages: msgs, + readPos: batchSize, + cap: connCapabilities{ + DF: supportsDF, + GSO: isGSOEnabled(rawConn), + ECN: isECNEnabled(), + }, + } + for i := 0; i < batchSize; i++ { + oobConn.messages[i].OOB = make([]byte, oobBufferSize) + } + return oobConn, nil +} + +var invalidCmsgOnceV4, invalidCmsgOnceV6 sync.Once + +func (c *oobConn) ReadPacket() (receivedPacket, error) { + if len(c.messages) == int(c.readPos) { // all messages read. Read the next batch of messages. + c.messages = c.messages[:batchSize] + // replace buffers data buffers up to the packet that has been consumed during the last ReadBatch call + for i := uint8(0); i < c.readPos; i++ { + buffer := getPacketBuffer() + buffer.Data = buffer.Data[:protocol.MaxPacketBufferSize] + c.buffers[i] = buffer + c.messages[i].Buffers[0] = c.buffers[i].Data + } + c.readPos = 0 + + n, err := c.batchConn.ReadBatch(c.messages, 0) + if n == 0 || err != nil { + return receivedPacket{}, err + } + c.messages = c.messages[:n] + } + + msg := c.messages[c.readPos] + buffer := c.buffers[c.readPos] + c.readPos++ + + data := msg.OOB[:msg.NN] + p := receivedPacket{ + remoteAddr: msg.Addr, + rcvTime: time.Now(), + data: msg.Buffers[0][:msg.N], + buffer: buffer, + } + for len(data) > 0 { + hdr, body, remainder, err := unix.ParseOneSocketControlMessage(data) + if err != nil { + return receivedPacket{}, err + } + if hdr.Level == unix.IPPROTO_IP { + switch hdr.Type { + case msgTypeIPTOS: + p.ecn = protocol.ParseECNHeaderBits(body[0] & ecnMask) + case ipv4PKTINFO: + ip, ifIndex, ok := parseIPv4PktInfo(body) + if ok { + p.info.addr = ip + p.info.ifIndex = ifIndex + } else { + invalidCmsgOnceV4.Do(func() { + log.Printf("Received invalid IPv4 packet info control message: %+x. "+ + "This should never occur, please open a new issue and include details about the architecture.", body) + }) + } + } + } + if hdr.Level == unix.IPPROTO_IPV6 { + switch hdr.Type { + case unix.IPV6_TCLASS: + p.ecn = protocol.ParseECNHeaderBits(body[0] & ecnMask) + case unix.IPV6_PKTINFO: + // struct in6_pktinfo { + // struct in6_addr ipi6_addr; /* src/dst IPv6 address */ + // unsigned int ipi6_ifindex; /* send/recv interface index */ + // }; + if len(body) == 20 { + p.info.addr = netip.AddrFrom16(*(*[16]byte)(body[:16])).Unmap() + p.info.ifIndex = binary.LittleEndian.Uint32(body[16:]) + } else { + invalidCmsgOnceV6.Do(func() { + log.Printf("Received invalid IPv6 packet info control message: %+x. "+ + "This should never occur, please open a new issue and include details about the architecture.", body) + }) + } + } + } + data = remainder + } + return p, nil +} + +// WritePacket writes a new packet. +func (c *oobConn) WritePacket(b []byte, addr net.Addr, packetInfoOOB []byte, gsoSize uint16, ecn protocol.ECN) (int, error) { + oob := packetInfoOOB + if gsoSize > 0 { + if !c.capabilities().GSO { + panic("GSO disabled") + } + oob = appendUDPSegmentSizeMsg(oob, gsoSize) + } + if ecn != protocol.ECNUnsupported { + if !c.capabilities().ECN { + panic("tried to send an ECN-marked packet although ECN is disabled") + } + if remoteUDPAddr, ok := addr.(*net.UDPAddr); ok { + if remoteUDPAddr.IP.To4() != nil { + oob = appendIPv4ECNMsg(oob, ecn) + } else { + oob = appendIPv6ECNMsg(oob, ecn) + } + } + } + n, _, err := c.OOBCapablePacketConn.WriteMsgUDP(b, oob, addr.(*net.UDPAddr)) + return n, err +} + +func (c *oobConn) capabilities() connCapabilities { + return c.cap +} + +type packetInfo struct { + addr netip.Addr + ifIndex uint32 +} + +func (info *packetInfo) OOB() []byte { + if info == nil { + return nil + } + if info.addr.Is4() { + ip := info.addr.As4() + // struct in_pktinfo { + // unsigned int ipi_ifindex; /* Interface index */ + // struct in_addr ipi_spec_dst; /* Local address */ + // struct in_addr ipi_addr; /* Header Destination address */ + // }; + cm := ipv4.ControlMessage{ + Src: ip[:], + IfIndex: int(info.ifIndex), + } + return cm.Marshal() + } else if info.addr.Is6() { + ip := info.addr.As16() + // struct in6_pktinfo { + // struct in6_addr ipi6_addr; /* src/dst IPv6 address */ + // unsigned int ipi6_ifindex; /* send/recv interface index */ + // }; + cm := ipv6.ControlMessage{ + Src: ip[:], + IfIndex: int(info.ifIndex), + } + return cm.Marshal() + } + return nil +} + +func appendIPv4ECNMsg(b []byte, val protocol.ECN) []byte { + startLen := len(b) + b = append(b, make([]byte, unix.CmsgSpace(ecnIPv4DataLen))...) + h := (*unix.Cmsghdr)(unsafe.Pointer(&b[startLen])) + h.Level = syscall.IPPROTO_IP + h.Type = unix.IP_TOS + h.SetLen(unix.CmsgLen(ecnIPv4DataLen)) + + // UnixRights uses the private `data` method, but I *think* this achieves the same goal. + offset := startLen + unix.CmsgSpace(0) + b[offset] = val.ToHeaderBits() + return b +} + +func appendIPv6ECNMsg(b []byte, val protocol.ECN) []byte { + startLen := len(b) + const dataLen = 4 + b = append(b, make([]byte, unix.CmsgSpace(dataLen))...) + h := (*unix.Cmsghdr)(unsafe.Pointer(&b[startLen])) + h.Level = syscall.IPPROTO_IPV6 + h.Type = unix.IPV6_TCLASS + h.SetLen(unix.CmsgLen(dataLen)) + + // UnixRights uses the private `data` method, but I *think* this achieves the same goal. + offset := startLen + unix.CmsgSpace(0) + b[offset] = val.ToHeaderBits() + return b +} diff --git a/vendor/github.com/quic-go/quic-go/sys_conn_windows.go b/vendor/github.com/quic-go/quic-go/sys_conn_windows.go new file mode 100644 index 00000000..b9c1cbc8 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/sys_conn_windows.go @@ -0,0 +1,42 @@ +//go:build windows + +package quic + +import ( + "net/netip" + "syscall" + + "golang.org/x/sys/windows" +) + +func newConn(c OOBCapablePacketConn, supportsDF bool) (*basicConn, error) { + return &basicConn{PacketConn: c, supportsDF: supportsDF}, nil +} + +func inspectReadBuffer(c syscall.RawConn) (int, error) { + var size int + var serr error + if err := c.Control(func(fd uintptr) { + size, serr = windows.GetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_RCVBUF) + }); err != nil { + return 0, err + } + return size, serr +} + +func inspectWriteBuffer(c syscall.RawConn) (int, error) { + var size int + var serr error + if err := c.Control(func(fd uintptr) { + size, serr = windows.GetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_SNDBUF) + }); err != nil { + return 0, err + } + return size, serr +} + +type packetInfo struct { + addr netip.Addr +} + +func (i *packetInfo) OOB() []byte { return nil } diff --git a/vendor/github.com/quic-go/quic-go/token_store.go b/vendor/github.com/quic-go/quic-go/token_store.go new file mode 100644 index 00000000..a5c1c185 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/token_store.go @@ -0,0 +1,116 @@ +package quic + +import ( + "sync" + + list "github.com/quic-go/quic-go/internal/utils/linkedlist" +) + +type singleOriginTokenStore struct { + tokens []*ClientToken + len int + p int +} + +func newSingleOriginTokenStore(size int) *singleOriginTokenStore { + return &singleOriginTokenStore{tokens: make([]*ClientToken, size)} +} + +func (s *singleOriginTokenStore) Add(token *ClientToken) { + s.tokens[s.p] = token + s.p = s.index(s.p + 1) + s.len = min(s.len+1, len(s.tokens)) +} + +func (s *singleOriginTokenStore) Pop() *ClientToken { + s.p = s.index(s.p - 1) + token := s.tokens[s.p] + s.tokens[s.p] = nil + s.len = max(s.len-1, 0) + return token +} + +func (s *singleOriginTokenStore) Len() int { + return s.len +} + +func (s *singleOriginTokenStore) index(i int) int { + mod := len(s.tokens) + return (i + mod) % mod +} + +type lruTokenStoreEntry struct { + key string + cache *singleOriginTokenStore +} + +type lruTokenStore struct { + mutex sync.Mutex + + m map[string]*list.Element[*lruTokenStoreEntry] + q *list.List[*lruTokenStoreEntry] + capacity int + singleOriginSize int +} + +var _ TokenStore = &lruTokenStore{} + +// NewLRUTokenStore creates a new LRU cache for tokens received by the client. +// maxOrigins specifies how many origins this cache is saving tokens for. +// tokensPerOrigin specifies the maximum number of tokens per origin. +func NewLRUTokenStore(maxOrigins, tokensPerOrigin int) TokenStore { + return &lruTokenStore{ + m: make(map[string]*list.Element[*lruTokenStoreEntry]), + q: list.New[*lruTokenStoreEntry](), + capacity: maxOrigins, + singleOriginSize: tokensPerOrigin, + } +} + +func (s *lruTokenStore) Put(key string, token *ClientToken) { + s.mutex.Lock() + defer s.mutex.Unlock() + + if el, ok := s.m[key]; ok { + entry := el.Value + entry.cache.Add(token) + s.q.MoveToFront(el) + return + } + + if s.q.Len() < s.capacity { + entry := &lruTokenStoreEntry{ + key: key, + cache: newSingleOriginTokenStore(s.singleOriginSize), + } + entry.cache.Add(token) + s.m[key] = s.q.PushFront(entry) + return + } + + elem := s.q.Back() + entry := elem.Value + delete(s.m, entry.key) + entry.key = key + entry.cache = newSingleOriginTokenStore(s.singleOriginSize) + entry.cache.Add(token) + s.q.MoveToFront(elem) + s.m[key] = elem +} + +func (s *lruTokenStore) Pop(key string) *ClientToken { + s.mutex.Lock() + defer s.mutex.Unlock() + + var token *ClientToken + if el, ok := s.m[key]; ok { + s.q.MoveToFront(el) + cache := el.Value.cache + token = cache.Pop() + if cache.Len() == 0 { + s.q.Remove(el) + delete(s.m, key) + } + } + return token +} diff --git a/vendor/github.com/quic-go/quic-go/tools.go b/vendor/github.com/quic-go/quic-go/tools.go new file mode 100644 index 00000000..d00ce748 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/tools.go @@ -0,0 +1,8 @@ +//go:build tools + +package quic + +import ( + _ "github.com/onsi/ginkgo/v2/ginkgo" + _ "go.uber.org/mock/mockgen" +) diff --git a/vendor/github.com/quic-go/quic-go/transport.go b/vendor/github.com/quic-go/quic-go/transport.go new file mode 100644 index 00000000..059f30f5 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/transport.go @@ -0,0 +1,554 @@ +package quic + +import ( + "context" + "crypto/rand" + "crypto/tls" + "errors" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" + "github.com/quic-go/quic-go/logging" +) + +var errListenerAlreadySet = errors.New("listener already set") + +// The Transport is the central point to manage incoming and outgoing QUIC connections. +// QUIC demultiplexes connections based on their QUIC Connection IDs, not based on the 4-tuple. +// This means that a single UDP socket can be used for listening for incoming connections, as well as +// for dialing an arbitrary number of outgoing connections. +// A Transport handles a single net.PacketConn, and offers a range of configuration options +// compared to the simple helper functions like Listen and Dial that this package provides. +type Transport struct { + // A single net.PacketConn can only be handled by one Transport. + // Bad things will happen if passed to multiple Transports. + // + // A number of optimizations will be enabled if the connections implements the OOBCapablePacketConn interface, + // as a *net.UDPConn does. + // 1. It enables the Don't Fragment (DF) bit on the IP header. + // This is required to run DPLPMTUD (Path MTU Discovery, RFC 8899). + // 2. It enables reading of the ECN bits from the IP header. + // This allows the remote node to speed up its loss detection and recovery. + // 3. It uses batched syscalls (recvmmsg) to more efficiently receive packets from the socket. + // 4. It uses Generic Segmentation Offload (GSO) to efficiently send batches of packets (on Linux). + // + // After passing the connection to the Transport, it's invalid to call ReadFrom or WriteTo on the connection. + Conn net.PacketConn + + // The length of the connection ID in bytes. + // It can be any value between 1 and 20. + // Due to the increased risk of collisions, it is not recommended to use connection IDs shorter than 4 bytes. + // If unset, a 4 byte connection ID will be used. + ConnectionIDLength int + + // Use for generating new connection IDs. + // This allows the application to control of the connection IDs used, + // which allows routing / load balancing based on connection IDs. + // All Connection IDs returned by the ConnectionIDGenerator MUST + // have the same length. + ConnectionIDGenerator ConnectionIDGenerator + + // The StatelessResetKey is used to generate stateless reset tokens. + // If no key is configured, sending of stateless resets is disabled. + // It is highly recommended to configure a stateless reset key, as stateless resets + // allow the peer to quickly recover from crashes and reboots of this node. + // See section 10.3 of RFC 9000 for details. + StatelessResetKey *StatelessResetKey + + // The TokenGeneratorKey is used to encrypt session resumption tokens. + // If no key is configured, a random key will be generated. + // If multiple servers are authoritative for the same domain, they should use the same key, + // see section 8.1.3 of RFC 9000 for details. + TokenGeneratorKey *TokenGeneratorKey + + // MaxTokenAge is the maximum age of the resumption token presented during the handshake. + // These tokens allow skipping address resumption when resuming a QUIC connection, + // and are especially useful when using 0-RTT. + // If not set, it defaults to 24 hours. + // See section 8.1.3 of RFC 9000 for details. + MaxTokenAge time.Duration + + // DisableVersionNegotiationPackets disables the sending of Version Negotiation packets. + // This can be useful if version information is exchanged out-of-band. + // It has no effect for clients. + DisableVersionNegotiationPackets bool + + // VerifySourceAddress decides if a connection attempt originating from unvalidated source + // addresses first needs to go through source address validation using QUIC's Retry mechanism, + // as described in RFC 9000 section 8.1.2. + // Note that the address passed to this callback is unvalidated, and might be spoofed in case + // of an attack. + // Validating the source address adds one additional network roundtrip to the handshake, + // and should therefore only be used if a suspiciously high number of incoming connection is recorded. + // For most use cases, wrapping the Allow function of a rate.Limiter will be a reasonable + // implementation of this callback (negating its return value). + VerifySourceAddress func(net.Addr) bool + + // ConnContext is called when the server accepts a new connection. + // The context is closed when the connection is closed, or when the handshake fails for any reason. + // The context returned from the callback is used to derive every other context used during the + // lifetime of the connection: + // * the context passed to crypto/tls (and used on the tls.ClientHelloInfo) + // * the context used in Config.Tracer + // * the context returned from Connection.Context + // * the context returned from SendStream.Context + // It is not used for dialed connections. + ConnContext func(context.Context) context.Context + + // A Tracer traces events that don't belong to a single QUIC connection. + // Tracer.Close is called when the transport is closed. + Tracer *logging.Tracer + + handlerMap packetHandlerManager + + mutex sync.Mutex + initOnce sync.Once + initErr error + + // Set in init. + // If no ConnectionIDGenerator is set, this is the ConnectionIDLength. + connIDLen int + // Set in init. + // If no ConnectionIDGenerator is set, this is set to a default. + connIDGenerator ConnectionIDGenerator + + server *baseServer + + conn rawConn + + closeQueue chan closePacket + statelessResetQueue chan receivedPacket + + listening chan struct{} // is closed when listen returns + closed bool + createdConn bool + isSingleUse bool // was created for a single server or client, i.e. by calling quic.Listen or quic.Dial + + readingNonQUICPackets atomic.Bool + nonQUICPackets chan receivedPacket + + logger utils.Logger +} + +// Listen starts listening for incoming QUIC connections. +// There can only be a single listener on any net.PacketConn. +// Listen may only be called again after the current Listener was closed. +func (t *Transport) Listen(tlsConf *tls.Config, conf *Config) (*Listener, error) { + s, err := t.createServer(tlsConf, conf, false) + if err != nil { + return nil, err + } + return &Listener{baseServer: s}, nil +} + +// ListenEarly starts listening for incoming QUIC connections. +// There can only be a single listener on any net.PacketConn. +// Listen may only be called again after the current Listener was closed. +func (t *Transport) ListenEarly(tlsConf *tls.Config, conf *Config) (*EarlyListener, error) { + s, err := t.createServer(tlsConf, conf, true) + if err != nil { + return nil, err + } + return &EarlyListener{baseServer: s}, nil +} + +func (t *Transport) createServer(tlsConf *tls.Config, conf *Config, allow0RTT bool) (*baseServer, error) { + if tlsConf == nil { + return nil, errors.New("quic: tls.Config not set") + } + if err := validateConfig(conf); err != nil { + return nil, err + } + + t.mutex.Lock() + defer t.mutex.Unlock() + + if t.server != nil { + return nil, errListenerAlreadySet + } + conf = populateConfig(conf) + if err := t.init(false); err != nil { + return nil, err + } + s := newServer( + t.conn, + t.handlerMap, + t.connIDGenerator, + t.ConnContext, + tlsConf, + conf, + t.Tracer, + t.closeServer, + *t.TokenGeneratorKey, + t.MaxTokenAge, + t.VerifySourceAddress, + t.DisableVersionNegotiationPackets, + allow0RTT, + ) + t.server = s + return s, nil +} + +// Dial dials a new connection to a remote host (not using 0-RTT). +func (t *Transport) Dial(ctx context.Context, addr net.Addr, tlsConf *tls.Config, conf *Config) (Connection, error) { + return t.dial(ctx, addr, "", tlsConf, conf, false) +} + +// DialEarly dials a new connection, attempting to use 0-RTT if possible. +func (t *Transport) DialEarly(ctx context.Context, addr net.Addr, tlsConf *tls.Config, conf *Config) (EarlyConnection, error) { + return t.dial(ctx, addr, "", tlsConf, conf, true) +} + +func (t *Transport) dial(ctx context.Context, addr net.Addr, host string, tlsConf *tls.Config, conf *Config, use0RTT bool) (EarlyConnection, error) { + if err := validateConfig(conf); err != nil { + return nil, err + } + conf = populateConfig(conf) + if err := t.init(t.isSingleUse); err != nil { + return nil, err + } + var onClose func() + if t.isSingleUse { + onClose = func() { t.Close() } + } + tlsConf = tlsConf.Clone() + setTLSConfigServerName(tlsConf, addr, host) + return dial(ctx, newSendConn(t.conn, addr, packetInfo{}, utils.DefaultLogger), t.connIDGenerator, t.handlerMap, tlsConf, conf, onClose, use0RTT) +} + +func (t *Transport) init(allowZeroLengthConnIDs bool) error { + t.initOnce.Do(func() { + var conn rawConn + if c, ok := t.Conn.(rawConn); ok { + conn = c + } else { + var err error + conn, err = wrapConn(t.Conn) + if err != nil { + t.initErr = err + return + } + } + + t.logger = utils.DefaultLogger // TODO: make this configurable + t.conn = conn + t.handlerMap = newPacketHandlerMap(t.StatelessResetKey, t.enqueueClosePacket, t.logger) + t.listening = make(chan struct{}) + + t.closeQueue = make(chan closePacket, 4) + t.statelessResetQueue = make(chan receivedPacket, 4) + if t.TokenGeneratorKey == nil { + var key TokenGeneratorKey + if _, err := rand.Read(key[:]); err != nil { + t.initErr = err + return + } + t.TokenGeneratorKey = &key + } + + if t.ConnectionIDGenerator != nil { + t.connIDGenerator = t.ConnectionIDGenerator + t.connIDLen = t.ConnectionIDGenerator.ConnectionIDLen() + } else { + connIDLen := t.ConnectionIDLength + if t.ConnectionIDLength == 0 && !allowZeroLengthConnIDs { + connIDLen = protocol.DefaultConnectionIDLength + } + t.connIDLen = connIDLen + t.connIDGenerator = &protocol.DefaultConnectionIDGenerator{ConnLen: t.connIDLen} + } + + getMultiplexer().AddConn(t.Conn) + go t.listen(conn) + go t.runSendQueue() + }) + return t.initErr +} + +// WriteTo sends a packet on the underlying connection. +func (t *Transport) WriteTo(b []byte, addr net.Addr) (int, error) { + if err := t.init(false); err != nil { + return 0, err + } + return t.conn.WritePacket(b, addr, nil, 0, protocol.ECNUnsupported) +} + +func (t *Transport) enqueueClosePacket(p closePacket) { + select { + case t.closeQueue <- p: + default: + // Oops, we're backlogged. + // Just drop the packet, sending CONNECTION_CLOSE copies is best effort anyway. + } +} + +func (t *Transport) runSendQueue() { + for { + select { + case <-t.listening: + return + case p := <-t.closeQueue: + t.conn.WritePacket(p.payload, p.addr, p.info.OOB(), 0, protocol.ECNUnsupported) + case p := <-t.statelessResetQueue: + t.sendStatelessReset(p) + } + } +} + +// Close closes the underlying connection. +// If any listener was started, it will be closed as well. +// It is invalid to start new listeners or connections after that. +func (t *Transport) Close() error { + t.close(errors.New("closing")) + if t.createdConn { + if err := t.Conn.Close(); err != nil { + return err + } + } else if t.conn != nil { + t.conn.SetReadDeadline(time.Now()) + defer func() { t.conn.SetReadDeadline(time.Time{}) }() + } + if t.listening != nil { + <-t.listening // wait until listening returns + } + return nil +} + +func (t *Transport) closeServer() { + t.mutex.Lock() + t.server = nil + if t.isSingleUse { + t.closed = true + } + t.mutex.Unlock() + if t.createdConn { + t.Conn.Close() + } + if t.isSingleUse { + t.conn.SetReadDeadline(time.Now()) + defer func() { t.conn.SetReadDeadline(time.Time{}) }() + <-t.listening // wait until listening returns + } +} + +func (t *Transport) close(e error) { + t.mutex.Lock() + defer t.mutex.Unlock() + if t.closed { + return + } + + if t.handlerMap != nil { + t.handlerMap.Close(e) + } + if t.server != nil { + t.server.close(e, false) + } + if t.Tracer != nil && t.Tracer.Close != nil { + t.Tracer.Close() + } + t.closed = true +} + +// only print warnings about the UDP receive buffer size once +var setBufferWarningOnce sync.Once + +func (t *Transport) listen(conn rawConn) { + defer close(t.listening) + defer getMultiplexer().RemoveConn(t.Conn) + + for { + p, err := conn.ReadPacket() + //nolint:staticcheck // SA1019 ignore this! + // TODO: This code is used to ignore wsa errors on Windows. + // Since net.Error.Temporary is deprecated as of Go 1.18, we should find a better solution. + // See https://github.com/quic-go/quic-go/issues/1737 for details. + if nerr, ok := err.(net.Error); ok && nerr.Temporary() { + t.mutex.Lock() + closed := t.closed + t.mutex.Unlock() + if closed { + return + } + t.logger.Debugf("Temporary error reading from conn: %w", err) + continue + } + if err != nil { + // Windows returns an error when receiving a UDP datagram that doesn't fit into the provided buffer. + if isRecvMsgSizeErr(err) { + continue + } + t.close(err) + return + } + t.handlePacket(p) + } +} + +func (t *Transport) handlePacket(p receivedPacket) { + if len(p.data) == 0 { + return + } + if !wire.IsPotentialQUICPacket(p.data[0]) && !wire.IsLongHeaderPacket(p.data[0]) { + t.handleNonQUICPacket(p) + return + } + connID, err := wire.ParseConnectionID(p.data, t.connIDLen) + if err != nil { + t.logger.Debugf("error parsing connection ID on packet from %s: %s", p.remoteAddr, err) + if t.Tracer != nil && t.Tracer.DroppedPacket != nil { + t.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropHeaderParseError) + } + p.buffer.MaybeRelease() + return + } + + // If there's a connection associated with the connection ID, pass the packet there. + if handler, ok := t.handlerMap.Get(connID); ok { + handler.handlePacket(p) + return + } + // RFC 9000 section 10.3.1 requires that the stateless reset detection logic is run for both + // packets that cannot be associated with any connections, and for packets that can't be decrypted. + // We deviate from the RFC and ignore the latter: If a packet's connection ID is associated with an + // existing connection, it is dropped there if if it can't be decrypted. + // Stateless resets use random connection IDs, and at reasonable connection ID lengths collisions are + // exceedingly rare. In the unlikely event that a stateless reset is misrouted to an existing connection, + // it is to be expected that the next stateless reset will be correctly detected. + if isStatelessReset := t.maybeHandleStatelessReset(p.data); isStatelessReset { + return + } + if !wire.IsLongHeaderPacket(p.data[0]) { + t.maybeSendStatelessReset(p) + return + } + + t.mutex.Lock() + defer t.mutex.Unlock() + if t.server == nil { // no server set + t.logger.Debugf("received a packet with an unexpected connection ID %s", connID) + return + } + t.server.handlePacket(p) +} + +func (t *Transport) maybeSendStatelessReset(p receivedPacket) { + if t.StatelessResetKey == nil { + p.buffer.Release() + return + } + + // Don't send a stateless reset in response to very small packets. + // This includes packets that could be stateless resets. + if len(p.data) <= protocol.MinStatelessResetSize { + p.buffer.Release() + return + } + + select { + case t.statelessResetQueue <- p: + default: + // it's fine to not send a stateless reset when we're busy + p.buffer.Release() + } +} + +func (t *Transport) sendStatelessReset(p receivedPacket) { + defer p.buffer.Release() + + connID, err := wire.ParseConnectionID(p.data, t.connIDLen) + if err != nil { + t.logger.Errorf("error parsing connection ID on packet from %s: %s", p.remoteAddr, err) + return + } + token := t.handlerMap.GetStatelessResetToken(connID) + t.logger.Debugf("Sending stateless reset to %s (connection ID: %s). Token: %#x", p.remoteAddr, connID, token) + data := make([]byte, protocol.MinStatelessResetSize-16, protocol.MinStatelessResetSize) + rand.Read(data) + data[0] = (data[0] & 0x7f) | 0x40 + data = append(data, token[:]...) + if _, err := t.conn.WritePacket(data, p.remoteAddr, p.info.OOB(), 0, protocol.ECNUnsupported); err != nil { + t.logger.Debugf("Error sending Stateless Reset to %s: %s", p.remoteAddr, err) + } +} + +func (t *Transport) maybeHandleStatelessReset(data []byte) bool { + // stateless resets are always short header packets + if wire.IsLongHeaderPacket(data[0]) { + return false + } + if len(data) < 17 /* type byte + 16 bytes for the reset token */ { + return false + } + + token := *(*protocol.StatelessResetToken)(data[len(data)-16:]) + if conn, ok := t.handlerMap.GetByResetToken(token); ok { + t.logger.Debugf("Received a stateless reset with token %#x. Closing connection.", token) + go conn.destroy(&StatelessResetError{Token: token}) + return true + } + return false +} + +func (t *Transport) handleNonQUICPacket(p receivedPacket) { + // Strictly speaking, this is racy, + // but we only care about receiving packets at some point after ReadNonQUICPacket has been called. + if !t.readingNonQUICPackets.Load() { + return + } + select { + case t.nonQUICPackets <- p: + default: + if t.Tracer != nil && t.Tracer.DroppedPacket != nil { + t.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropDOSPrevention) + } + } +} + +const maxQueuedNonQUICPackets = 32 + +// ReadNonQUICPacket reads non-QUIC packets received on the underlying connection. +// The detection logic is very simple: Any packet that has the first and second bit of the packet set to 0. +// Note that this is stricter than the detection logic defined in RFC 9443. +func (t *Transport) ReadNonQUICPacket(ctx context.Context, b []byte) (int, net.Addr, error) { + if err := t.init(false); err != nil { + return 0, nil, err + } + if !t.readingNonQUICPackets.Load() { + t.nonQUICPackets = make(chan receivedPacket, maxQueuedNonQUICPackets) + t.readingNonQUICPackets.Store(true) + } + select { + case <-ctx.Done(): + return 0, nil, ctx.Err() + case p := <-t.nonQUICPackets: + n := copy(b, p.data) + return n, p.remoteAddr, nil + case <-t.listening: + return 0, nil, errors.New("closed") + } +} + +func setTLSConfigServerName(tlsConf *tls.Config, addr net.Addr, host string) { + // If no ServerName is set, infer the ServerName from the host we're connecting to. + if tlsConf.ServerName != "" { + return + } + if host == "" { + if udpAddr, ok := addr.(*net.UDPAddr); ok { + tlsConf.ServerName = udpAddr.IP.String() + return + } + } + h, _, err := net.SplitHostPort(host) + if err != nil { // This happens if the host doesn't contain a port number. + tlsConf.ServerName = host + return + } + tlsConf.ServerName = h +} diff --git a/vendor/go.uber.org/mock/AUTHORS b/vendor/go.uber.org/mock/AUTHORS new file mode 100644 index 00000000..660b8ccc --- /dev/null +++ b/vendor/go.uber.org/mock/AUTHORS @@ -0,0 +1,12 @@ +# This is the official list of GoMock authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Alex Reece +Google Inc. diff --git a/vendor/go.uber.org/mock/LICENSE b/vendor/go.uber.org/mock/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/go.uber.org/mock/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.uber.org/mock/mockgen/generic_go118.go b/vendor/go.uber.org/mock/mockgen/generic_go118.go new file mode 100644 index 00000000..b7b44947 --- /dev/null +++ b/vendor/go.uber.org/mock/mockgen/generic_go118.go @@ -0,0 +1,130 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package main + +import ( + "errors" + "fmt" + "go/ast" + "go/token" + "strings" + + "go.uber.org/mock/mockgen/model" +) + +func getTypeSpecTypeParams(ts *ast.TypeSpec) []*ast.Field { + if ts == nil || ts.TypeParams == nil { + return nil + } + return ts.TypeParams.List +} + +func (p *fileParser) parseGenericType(pkg string, typ ast.Expr, tps map[string]model.Type) (model.Type, error) { + switch v := typ.(type) { + case *ast.IndexExpr: + m, err := p.parseType(pkg, v.X, tps) + if err != nil { + return nil, err + } + nm, ok := m.(*model.NamedType) + if !ok { + return m, nil + } + t, err := p.parseType(pkg, v.Index, tps) + if err != nil { + return nil, err + } + nm.TypeParams = &model.TypeParametersType{TypeParameters: []model.Type{t}} + return m, nil + case *ast.IndexListExpr: + m, err := p.parseType(pkg, v.X, tps) + if err != nil { + return nil, err + } + nm, ok := m.(*model.NamedType) + if !ok { + return m, nil + } + var ts []model.Type + for _, expr := range v.Indices { + t, err := p.parseType(pkg, expr, tps) + if err != nil { + return nil, err + } + ts = append(ts, t) + } + nm.TypeParams = &model.TypeParametersType{TypeParameters: ts} + return m, nil + } + return nil, nil +} + +func getIdentTypeParams(decl any) string { + if decl == nil { + return "" + } + ts, ok := decl.(*ast.TypeSpec) + if !ok { + return "" + } + if ts.TypeParams == nil || len(ts.TypeParams.List) == 0 { + return "" + } + var sb strings.Builder + sb.WriteString("[") + for i, v := range ts.TypeParams.List { + if i != 0 { + sb.WriteString(", ") + } + sb.WriteString(v.Names[0].Name) + } + sb.WriteString("]") + return sb.String() +} + +func (p *fileParser) parseGenericMethod(field *ast.Field, it *namedInterface, iface *model.Interface, pkg string, tps map[string]model.Type) ([]*model.Method, error) { + var indices []ast.Expr + var typ ast.Expr + switch v := field.Type.(type) { + case *ast.IndexExpr: + indices = []ast.Expr{v.Index} + typ = v.X + case *ast.IndexListExpr: + indices = v.Indices + typ = v.X + case *ast.UnaryExpr: + if v.Op == token.TILDE { + return nil, errConstraintInterface + } + return nil, fmt.Errorf("~T may only appear as constraint for %T", field.Type) + case *ast.BinaryExpr: + if v.Op == token.OR { + return nil, errConstraintInterface + } + return nil, fmt.Errorf("A|B may only appear as constraint for %T", field.Type) + default: + return nil, fmt.Errorf("don't know how to mock method of type %T", field.Type) + } + + nf := &ast.Field{ + Doc: field.Comment, + Names: field.Names, + Type: typ, + Tag: field.Tag, + Comment: field.Comment, + } + + it.embeddedInstTypeParams = indices + + return p.parseMethod(nf, it, iface, pkg, tps) +} + +var errConstraintInterface = errors.New("interface contains constraints") diff --git a/vendor/go.uber.org/mock/mockgen/generic_notgo118.go b/vendor/go.uber.org/mock/mockgen/generic_notgo118.go new file mode 100644 index 00000000..8a779c8b --- /dev/null +++ b/vendor/go.uber.org/mock/mockgen/generic_notgo118.go @@ -0,0 +1,41 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.18 +// +build !go1.18 + +package main + +import ( + "fmt" + "go/ast" + + "go.uber.org/mock/mockgen/model" +) + +func getTypeSpecTypeParams(ts *ast.TypeSpec) []*ast.Field { + return nil +} + +func (p *fileParser) parseGenericType(pkg string, typ ast.Expr, tps map[string]model.Type) (model.Type, error) { + return nil, nil +} + +func getIdentTypeParams(decl any) string { + return "" +} + +func (p *fileParser) parseGenericMethod(field *ast.Field, it *namedInterface, iface *model.Interface, pkg string, tps map[string]model.Type) ([]*model.Method, error) { + return nil, fmt.Errorf("don't know how to mock method of type %T", field.Type) +} diff --git a/vendor/go.uber.org/mock/mockgen/mockgen.go b/vendor/go.uber.org/mock/mockgen/mockgen.go new file mode 100644 index 00000000..df7d85f0 --- /dev/null +++ b/vendor/go.uber.org/mock/mockgen/mockgen.go @@ -0,0 +1,883 @@ +// Copyright 2010 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// MockGen generates mock implementations of Go interfaces. +package main + +// TODO: This does not support recursive embedded interfaces. +// TODO: This does not support embedding package-local interfaces in a separate file. + +import ( + "bytes" + "encoding/json" + "errors" + "flag" + "fmt" + "go/token" + "io" + "log" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" + "sort" + "strconv" + "strings" + "unicode" + + "golang.org/x/mod/modfile" + toolsimports "golang.org/x/tools/imports" + + "go.uber.org/mock/mockgen/model" +) + +const ( + gomockImportPath = "go.uber.org/mock/gomock" +) + +var ( + version = "" + commit = "none" + date = "unknown" +) + +var ( + source = flag.String("source", "", "(source mode) Input Go source file; enables source mode.") + destination = flag.String("destination", "", "Output file; defaults to stdout.") + mockNames = flag.String("mock_names", "", "Comma-separated interfaceName=mockName pairs of explicit mock names to use. Mock names default to 'Mock'+ interfaceName suffix.") + packageOut = flag.String("package", "", "Package of the generated code; defaults to the package of the input with a 'mock_' prefix.") + selfPackage = flag.String("self_package", "", "The full package import path for the generated code. The purpose of this flag is to prevent import cycles in the generated code by trying to include its own package. This can happen if the mock's package is set to one of its inputs (usually the main one) and the output is stdio so mockgen cannot detect the final output package. Setting this flag will then tell mockgen which import to exclude.") + writePkgComment = flag.Bool("write_package_comment", true, "Writes package documentation comment (godoc) if true.") + writeSourceComment = flag.Bool("write_source_comment", true, "Writes original file (source mode) or interface names (reflect mode) comment if true.") + writeGenerateDirective = flag.Bool("write_generate_directive", false, "Add //go:generate directive to regenerate the mock") + copyrightFile = flag.String("copyright_file", "", "Copyright file used to add copyright header") + typed = flag.Bool("typed", false, "Generate Type-safe 'Return', 'Do', 'DoAndReturn' function") + imports = flag.String("imports", "", "(source mode) Comma-separated name=path pairs of explicit imports to use.") + auxFiles = flag.String("aux_files", "", "(source mode) Comma-separated pkg=path pairs of auxiliary Go source files.") + excludeInterfaces = flag.String("exclude_interfaces", "", "Comma-separated names of interfaces to be excluded") + + debugParser = flag.Bool("debug_parser", false, "Print out parser results only.") + showVersion = flag.Bool("version", false, "Print version.") +) + +func main() { + flag.Usage = usage + flag.Parse() + + if *showVersion { + printVersion() + return + } + + var pkg *model.Package + var err error + var packageName string + if *source != "" { + pkg, err = sourceMode(*source) + } else { + if flag.NArg() != 2 { + usage() + log.Fatal("Expected exactly two arguments") + } + packageName = flag.Arg(0) + interfaces := strings.Split(flag.Arg(1), ",") + if packageName == "." { + dir, err := os.Getwd() + if err != nil { + log.Fatalf("Get current directory failed: %v", err) + } + packageName, err = packageNameOfDir(dir) + if err != nil { + log.Fatalf("Parse package name failed: %v", err) + } + } + pkg, err = reflectMode(packageName, interfaces) + } + if err != nil { + log.Fatalf("Loading input failed: %v", err) + } + + if *debugParser { + pkg.Print(os.Stdout) + return + } + + outputPackageName := *packageOut + if outputPackageName == "" { + // pkg.Name in reflect mode is the base name of the import path, + // which might have characters that are illegal to have in package names. + outputPackageName = "mock_" + sanitize(pkg.Name) + } + + // outputPackagePath represents the fully qualified name of the package of + // the generated code. Its purposes are to prevent the module from importing + // itself and to prevent qualifying type names that come from its own + // package (i.e. if there is a type called X then we want to print "X" not + // "package.X" since "package" is this package). This can happen if the mock + // is output into an already existing package. + outputPackagePath := *selfPackage + if outputPackagePath == "" && *destination != "" { + dstPath, err := filepath.Abs(filepath.Dir(*destination)) + if err == nil { + pkgPath, err := parsePackageImport(dstPath) + if err == nil { + outputPackagePath = pkgPath + } else { + log.Println("Unable to infer -self_package from destination file path:", err) + } + } else { + log.Println("Unable to determine destination file path:", err) + } + } + + g := new(generator) + if *source != "" { + g.filename = *source + } else { + g.srcPackage = packageName + g.srcInterfaces = flag.Arg(1) + } + g.destination = *destination + + if *mockNames != "" { + g.mockNames = parseMockNames(*mockNames) + } + if *copyrightFile != "" { + header, err := os.ReadFile(*copyrightFile) + if err != nil { + log.Fatalf("Failed reading copyright file: %v", err) + } + + g.copyrightHeader = string(header) + } + if err := g.Generate(pkg, outputPackageName, outputPackagePath); err != nil { + log.Fatalf("Failed generating mock: %v", err) + } + output := g.Output() + dst := os.Stdout + if len(*destination) > 0 { + if err := os.MkdirAll(filepath.Dir(*destination), os.ModePerm); err != nil { + log.Fatalf("Unable to create directory: %v", err) + } + existing, err := os.ReadFile(*destination) + if err != nil && !errors.Is(err, os.ErrNotExist) { + log.Fatalf("Failed reading pre-exiting destination file: %v", err) + } + if len(existing) == len(output) && bytes.Equal(existing, output) { + return + } + f, err := os.Create(*destination) + if err != nil { + log.Fatalf("Failed opening destination file: %v", err) + } + defer f.Close() + dst = f + } + if _, err := dst.Write(output); err != nil { + log.Fatalf("Failed writing to destination: %v", err) + } +} + +func parseMockNames(names string) map[string]string { + mocksMap := make(map[string]string) + for _, kv := range strings.Split(names, ",") { + parts := strings.SplitN(kv, "=", 2) + if len(parts) != 2 || parts[1] == "" { + log.Fatalf("bad mock names spec: %v", kv) + } + mocksMap[parts[0]] = parts[1] + } + return mocksMap +} + +func parseExcludeInterfaces(names string) map[string]struct{} { + splitNames := strings.Split(names, ",") + namesSet := make(map[string]struct{}, len(splitNames)) + for _, name := range splitNames { + if name == "" { + continue + } + + namesSet[name] = struct{}{} + } + + if len(namesSet) == 0 { + return nil + } + + return namesSet +} + +func usage() { + _, _ = io.WriteString(os.Stderr, usageText) + flag.PrintDefaults() +} + +const usageText = `mockgen has two modes of operation: source and reflect. + +Source mode generates mock interfaces from a source file. +It is enabled by using the -source flag. Other flags that +may be useful in this mode are -imports and -aux_files. +Example: + mockgen -source=foo.go [other options] + +Reflect mode generates mock interfaces by building a program +that uses reflection to understand interfaces. It is enabled +by passing two non-flag arguments: an import path, and a +comma-separated list of symbols. +Example: + mockgen database/sql/driver Conn,Driver + +` + +type generator struct { + buf bytes.Buffer + indent string + mockNames map[string]string // may be empty + filename string // may be empty + destination string // may be empty + srcPackage, srcInterfaces string // may be empty + copyrightHeader string + + packageMap map[string]string // map from import path to package name +} + +func (g *generator) p(format string, args ...any) { + fmt.Fprintf(&g.buf, g.indent+format+"\n", args...) +} + +func (g *generator) in() { + g.indent += "\t" +} + +func (g *generator) out() { + if len(g.indent) > 0 { + g.indent = g.indent[0 : len(g.indent)-1] + } +} + +// sanitize cleans up a string to make a suitable package name. +func sanitize(s string) string { + t := "" + for _, r := range s { + if t == "" { + if unicode.IsLetter(r) || r == '_' { + t += string(r) + continue + } + } else { + if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' { + t += string(r) + continue + } + } + t += "_" + } + if t == "_" { + t = "x" + } + return t +} + +func (g *generator) Generate(pkg *model.Package, outputPkgName string, outputPackagePath string) error { + if outputPkgName != pkg.Name && *selfPackage == "" { + // reset outputPackagePath if it's not passed in through -self_package + outputPackagePath = "" + } + + if g.copyrightHeader != "" { + lines := strings.Split(g.copyrightHeader, "\n") + for _, line := range lines { + g.p("// %s", line) + } + g.p("") + } + + g.p("// Code generated by MockGen. DO NOT EDIT.") + if *writeSourceComment { + if g.filename != "" { + g.p("// Source: %v", g.filename) + } else { + g.p("// Source: %v (interfaces: %v)", g.srcPackage, g.srcInterfaces) + } + } + g.p("//") + g.p("// Generated by this command:") + g.p("//") + // only log the name of the executable, not the full path + name := filepath.Base(os.Args[0]) + if runtime.GOOS == "windows" { + name = strings.TrimSuffix(name, ".exe") + } + g.p("//\t%v", strings.Join(append([]string{name}, os.Args[1:]...), " ")) + g.p("//") + + // Get all required imports, and generate unique names for them all. + im := pkg.Imports() + im[gomockImportPath] = true + + // Only import reflect if it's used. We only use reflect in mocked methods + // so only import if any of the mocked interfaces have methods. + for _, intf := range pkg.Interfaces { + if len(intf.Methods) > 0 { + im["reflect"] = true + break + } + } + + // Sort keys to make import alias generation predictable + sortedPaths := make([]string, len(im)) + x := 0 + for pth := range im { + sortedPaths[x] = pth + x++ + } + sort.Strings(sortedPaths) + + packagesName := createPackageMap(sortedPaths) + + definedImports := make(map[string]string, len(im)) + if *imports != "" { + for _, kv := range strings.Split(*imports, ",") { + eq := strings.Index(kv, "=") + if k, v := kv[:eq], kv[eq+1:]; k != "." { + definedImports[v] = k + } + } + } + + g.packageMap = make(map[string]string, len(im)) + localNames := make(map[string]bool, len(im)) + for _, pth := range sortedPaths { + base, ok := packagesName[pth] + if !ok { + base = sanitize(path.Base(pth)) + } + + // Local names for an imported package can usually be the basename of the import path. + // A couple of situations don't permit that, such as duplicate local names + // (e.g. importing "html/template" and "text/template"), or where the basename is + // a keyword (e.g. "foo/case") or when defining a name for that by using the -imports flag. + // try base0, base1, ... + pkgName := base + + if _, ok := definedImports[base]; ok { + pkgName = definedImports[base] + } + + i := 0 + for localNames[pkgName] || token.Lookup(pkgName).IsKeyword() || pkgName == "any" { + pkgName = base + strconv.Itoa(i) + i++ + } + + // Avoid importing package if source pkg == output pkg + if pth == pkg.PkgPath && outputPackagePath == pkg.PkgPath { + continue + } + + g.packageMap[pth] = pkgName + localNames[pkgName] = true + } + + if *writePkgComment { + // Ensure there's an empty line before the package to follow the recommendations: + // https://github.com/golang/go/wiki/CodeReviewComments#package-comments + g.p("") + + g.p("// Package %v is a generated GoMock package.", outputPkgName) + } + g.p("package %v", outputPkgName) + g.p("") + g.p("import (") + g.in() + for pkgPath, pkgName := range g.packageMap { + if pkgPath == outputPackagePath { + continue + } + g.p("%v %q", pkgName, pkgPath) + } + for _, pkgPath := range pkg.DotImports { + g.p(". %q", pkgPath) + } + g.out() + g.p(")") + + if *writeGenerateDirective { + g.p("//go:generate %v", strings.Join(os.Args, " ")) + } + + for _, intf := range pkg.Interfaces { + if err := g.GenerateMockInterface(intf, outputPackagePath); err != nil { + return err + } + } + + return nil +} + +// The name of the mock type to use for the given interface identifier. +func (g *generator) mockName(typeName string) string { + if mockName, ok := g.mockNames[typeName]; ok { + return mockName + } + + return "Mock" + typeName +} + +// formattedTypeParams returns a long and short form of type param info used for +// printing. If analyzing a interface with type param [I any, O any] the result +// will be: +// "[I any, O any]", "[I, O]" +func (g *generator) formattedTypeParams(it *model.Interface, pkgOverride string) (string, string) { + if len(it.TypeParams) == 0 { + return "", "" + } + var long, short strings.Builder + long.WriteString("[") + short.WriteString("[") + for i, v := range it.TypeParams { + if i != 0 { + long.WriteString(", ") + short.WriteString(", ") + } + long.WriteString(v.Name) + short.WriteString(v.Name) + long.WriteString(fmt.Sprintf(" %s", v.Type.String(g.packageMap, pkgOverride))) + } + long.WriteString("]") + short.WriteString("]") + return long.String(), short.String() +} + +func (g *generator) GenerateMockInterface(intf *model.Interface, outputPackagePath string) error { + mockType := g.mockName(intf.Name) + longTp, shortTp := g.formattedTypeParams(intf, outputPackagePath) + + g.p("") + g.p("// %v is a mock of %v interface.", mockType, intf.Name) + g.p("type %v%v struct {", mockType, longTp) + g.in() + g.p("ctrl *gomock.Controller") + g.p("recorder *%vMockRecorder%v", mockType, shortTp) + g.out() + g.p("}") + g.p("") + + g.p("// %vMockRecorder is the mock recorder for %v.", mockType, mockType) + g.p("type %vMockRecorder%v struct {", mockType, longTp) + g.in() + g.p("mock *%v%v", mockType, shortTp) + g.out() + g.p("}") + g.p("") + + g.p("// New%v creates a new mock instance.", mockType) + g.p("func New%v%v(ctrl *gomock.Controller) *%v%v {", mockType, longTp, mockType, shortTp) + g.in() + g.p("mock := &%v%v{ctrl: ctrl}", mockType, shortTp) + g.p("mock.recorder = &%vMockRecorder%v{mock}", mockType, shortTp) + g.p("return mock") + g.out() + g.p("}") + g.p("") + + // XXX: possible name collision here if someone has EXPECT in their interface. + g.p("// EXPECT returns an object that allows the caller to indicate expected use.") + g.p("func (m *%v%v) EXPECT() *%vMockRecorder%v {", mockType, shortTp, mockType, shortTp) + g.in() + g.p("return m.recorder") + g.out() + g.p("}") + + g.GenerateMockMethods(mockType, intf, outputPackagePath, longTp, shortTp, *typed) + + return nil +} + +type byMethodName []*model.Method + +func (b byMethodName) Len() int { return len(b) } +func (b byMethodName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byMethodName) Less(i, j int) bool { return b[i].Name < b[j].Name } + +func (g *generator) GenerateMockMethods(mockType string, intf *model.Interface, pkgOverride, longTp, shortTp string, typed bool) { + sort.Sort(byMethodName(intf.Methods)) + for _, m := range intf.Methods { + g.p("") + _ = g.GenerateMockMethod(mockType, m, pkgOverride, shortTp) + g.p("") + _ = g.GenerateMockRecorderMethod(intf, m, shortTp, typed) + if typed { + g.p("") + _ = g.GenerateMockReturnCallMethod(intf, m, pkgOverride, longTp, shortTp) + } + } +} + +func makeArgString(argNames, argTypes []string) string { + args := make([]string, len(argNames)) + for i, name := range argNames { + // specify the type only once for consecutive args of the same type + if i+1 < len(argTypes) && argTypes[i] == argTypes[i+1] { + args[i] = name + } else { + args[i] = name + " " + argTypes[i] + } + } + return strings.Join(args, ", ") +} + +// GenerateMockMethod generates a mock method implementation. +// If non-empty, pkgOverride is the package in which unqualified types reside. +func (g *generator) GenerateMockMethod(mockType string, m *model.Method, pkgOverride, shortTp string) error { + argNames := g.getArgNames(m, true /* in */) + argTypes := g.getArgTypes(m, pkgOverride, true /* in */) + argString := makeArgString(argNames, argTypes) + + rets := make([]string, len(m.Out)) + for i, p := range m.Out { + rets[i] = p.Type.String(g.packageMap, pkgOverride) + } + retString := strings.Join(rets, ", ") + if len(rets) > 1 { + retString = "(" + retString + ")" + } + if retString != "" { + retString = " " + retString + } + + ia := newIdentifierAllocator(argNames) + idRecv := ia.allocateIdentifier("m") + + g.p("// %v mocks base method.", m.Name) + g.p("func (%v *%v%v) %v(%v)%v {", idRecv, mockType, shortTp, m.Name, argString, retString) + g.in() + g.p("%s.ctrl.T.Helper()", idRecv) + + var callArgs string + if m.Variadic == nil { + if len(argNames) > 0 { + callArgs = ", " + strings.Join(argNames, ", ") + } + } else { + // Non-trivial. The generated code must build a []any, + // but the variadic argument may be any type. + idVarArgs := ia.allocateIdentifier("varargs") + idVArg := ia.allocateIdentifier("a") + g.p("%s := []any{%s}", idVarArgs, strings.Join(argNames[:len(argNames)-1], ", ")) + g.p("for _, %s := range %s {", idVArg, argNames[len(argNames)-1]) + g.in() + g.p("%s = append(%s, %s)", idVarArgs, idVarArgs, idVArg) + g.out() + g.p("}") + callArgs = ", " + idVarArgs + "..." + } + if len(m.Out) == 0 { + g.p(`%v.ctrl.Call(%v, %q%v)`, idRecv, idRecv, m.Name, callArgs) + } else { + idRet := ia.allocateIdentifier("ret") + g.p(`%v := %v.ctrl.Call(%v, %q%v)`, idRet, idRecv, idRecv, m.Name, callArgs) + + // Go does not allow "naked" type assertions on nil values, so we use the two-value form here. + // The value of that is either (x.(T), true) or (Z, false), where Z is the zero value for T. + // Happily, this coincides with the semantics we want here. + retNames := make([]string, len(rets)) + for i, t := range rets { + retNames[i] = ia.allocateIdentifier(fmt.Sprintf("ret%d", i)) + g.p("%s, _ := %s[%d].(%s)", retNames[i], idRet, i, t) + } + g.p("return " + strings.Join(retNames, ", ")) + } + + g.out() + g.p("}") + return nil +} + +func (g *generator) GenerateMockRecorderMethod(intf *model.Interface, m *model.Method, shortTp string, typed bool) error { + mockType := g.mockName(intf.Name) + argNames := g.getArgNames(m, true) + + var argString string + if m.Variadic == nil { + argString = strings.Join(argNames, ", ") + } else { + argString = strings.Join(argNames[:len(argNames)-1], ", ") + } + if argString != "" { + argString += " any" + } + + if m.Variadic != nil { + if argString != "" { + argString += ", " + } + argString += fmt.Sprintf("%s ...any", argNames[len(argNames)-1]) + } + + ia := newIdentifierAllocator(argNames) + idRecv := ia.allocateIdentifier("mr") + + g.p("// %v indicates an expected call of %v.", m.Name, m.Name) + if typed { + g.p("func (%s *%vMockRecorder%v) %v(%v) *%s%sCall%s {", idRecv, mockType, shortTp, m.Name, argString, mockType, m.Name, shortTp) + } else { + g.p("func (%s *%vMockRecorder%v) %v(%v) *gomock.Call {", idRecv, mockType, shortTp, m.Name, argString) + } + + g.in() + g.p("%s.mock.ctrl.T.Helper()", idRecv) + + var callArgs string + if m.Variadic == nil { + if len(argNames) > 0 { + callArgs = ", " + strings.Join(argNames, ", ") + } + } else { + if len(argNames) == 1 { + // Easy: just use ... to push the arguments through. + callArgs = ", " + argNames[0] + "..." + } else { + // Hard: create a temporary slice. + idVarArgs := ia.allocateIdentifier("varargs") + g.p("%s := append([]any{%s}, %s...)", + idVarArgs, + strings.Join(argNames[:len(argNames)-1], ", "), + argNames[len(argNames)-1]) + callArgs = ", " + idVarArgs + "..." + } + } + if typed { + g.p(`call := %s.mock.ctrl.RecordCallWithMethodType(%s.mock, "%s", reflect.TypeOf((*%s%s)(nil).%s)%s)`, idRecv, idRecv, m.Name, mockType, shortTp, m.Name, callArgs) + g.p(`return &%s%sCall%s{Call: call}`, mockType, m.Name, shortTp) + } else { + g.p(`return %s.mock.ctrl.RecordCallWithMethodType(%s.mock, "%s", reflect.TypeOf((*%s%s)(nil).%s)%s)`, idRecv, idRecv, m.Name, mockType, shortTp, m.Name, callArgs) + } + + g.out() + g.p("}") + return nil +} + +func (g *generator) GenerateMockReturnCallMethod(intf *model.Interface, m *model.Method, pkgOverride, longTp, shortTp string) error { + mockType := g.mockName(intf.Name) + argNames := g.getArgNames(m, true /* in */) + retNames := g.getArgNames(m, false /* out */) + argTypes := g.getArgTypes(m, pkgOverride, true /* in */) + retTypes := g.getArgTypes(m, pkgOverride, false /* out */) + argString := strings.Join(argTypes, ", ") + + rets := make([]string, len(m.Out)) + for i, p := range m.Out { + rets[i] = p.Type.String(g.packageMap, pkgOverride) + } + + var retString string + switch { + case len(rets) == 1: + retString = " " + rets[0] + case len(rets) > 1: + retString = " (" + strings.Join(rets, ", ") + ")" + } + + ia := newIdentifierAllocator(argNames) + idRecv := ia.allocateIdentifier("c") + + recvStructName := mockType + m.Name + + g.p("// %s%sCall wrap *gomock.Call", mockType, m.Name) + g.p("type %s%sCall%s struct{", mockType, m.Name, longTp) + g.in() + g.p("*gomock.Call") + g.out() + g.p("}") + + g.p("// Return rewrite *gomock.Call.Return") + g.p("func (%s *%sCall%s) Return(%v) *%sCall%s {", idRecv, recvStructName, shortTp, makeArgString(retNames, retTypes), recvStructName, shortTp) + g.in() + var retArgs string + if len(retNames) > 0 { + retArgs = strings.Join(retNames, ", ") + } + g.p(`%s.Call = %v.Call.Return(%v)`, idRecv, idRecv, retArgs) + g.p("return %s", idRecv) + g.out() + g.p("}") + + g.p("// Do rewrite *gomock.Call.Do") + g.p("func (%s *%sCall%s) Do(f func(%v)%v) *%sCall%s {", idRecv, recvStructName, shortTp, argString, retString, recvStructName, shortTp) + g.in() + g.p(`%s.Call = %v.Call.Do(f)`, idRecv, idRecv) + g.p("return %s", idRecv) + g.out() + g.p("}") + + g.p("// DoAndReturn rewrite *gomock.Call.DoAndReturn") + g.p("func (%s *%sCall%s) DoAndReturn(f func(%v)%v) *%sCall%s {", idRecv, recvStructName, shortTp, argString, retString, recvStructName, shortTp) + g.in() + g.p(`%s.Call = %v.Call.DoAndReturn(f)`, idRecv, idRecv) + g.p("return %s", idRecv) + g.out() + g.p("}") + return nil +} + +func (g *generator) getArgNames(m *model.Method, in bool) []string { + var params []*model.Parameter + if in { + params = m.In + } else { + params = m.Out + } + argNames := make([]string, len(params)) + for i, p := range params { + name := p.Name + if name == "" || name == "_" { + name = fmt.Sprintf("arg%d", i) + } + argNames[i] = name + } + if m.Variadic != nil && in { + name := m.Variadic.Name + if name == "" { + name = fmt.Sprintf("arg%d", len(params)) + } + argNames = append(argNames, name) + } + return argNames +} + +func (g *generator) getArgTypes(m *model.Method, pkgOverride string, in bool) []string { + var params []*model.Parameter + if in { + params = m.In + } else { + params = m.Out + } + argTypes := make([]string, len(params)) + for i, p := range params { + argTypes[i] = p.Type.String(g.packageMap, pkgOverride) + } + if m.Variadic != nil { + argTypes = append(argTypes, "..."+m.Variadic.Type.String(g.packageMap, pkgOverride)) + } + return argTypes +} + +type identifierAllocator map[string]struct{} + +func newIdentifierAllocator(taken []string) identifierAllocator { + a := make(identifierAllocator, len(taken)) + for _, s := range taken { + a[s] = struct{}{} + } + return a +} + +func (o identifierAllocator) allocateIdentifier(want string) string { + id := want + for i := 2; ; i++ { + if _, ok := o[id]; !ok { + o[id] = struct{}{} + return id + } + id = want + "_" + strconv.Itoa(i) + } +} + +// Output returns the generator's output, formatted in the standard Go style. +func (g *generator) Output() []byte { + src, err := toolsimports.Process(g.destination, g.buf.Bytes(), nil) + if err != nil { + log.Fatalf("Failed to format generated source code: %s\n%s", err, g.buf.String()) + } + return src +} + +// createPackageMap returns a map of import path to package name +// for specified importPaths. +func createPackageMap(importPaths []string) map[string]string { + var pkg struct { + Name string + ImportPath string + } + pkgMap := make(map[string]string) + b := bytes.NewBuffer(nil) + args := []string{"list", "-json"} + args = append(args, importPaths...) + cmd := exec.Command("go", args...) + cmd.Stdout = b + cmd.Run() + dec := json.NewDecoder(b) + for dec.More() { + err := dec.Decode(&pkg) + if err != nil { + log.Printf("failed to decode 'go list' output: %v", err) + continue + } + pkgMap[pkg.ImportPath] = pkg.Name + } + return pkgMap +} + +func printVersion() { + if version != "" { + fmt.Printf("v%s\nCommit: %s\nDate: %s\n", version, commit, date) + } else { + printModuleVersion() + } +} + +// parseImportPackage get package import path via source file +// an alternative implementation is to use: +// cfg := &packages.Config{Mode: packages.NeedName, Tests: true, Dir: srcDir} +// pkgs, err := packages.Load(cfg, "file="+source) +// However, it will call "go list" and slow down the performance +func parsePackageImport(srcDir string) (string, error) { + moduleMode := os.Getenv("GO111MODULE") + // trying to find the module + if moduleMode != "off" { + currentDir := srcDir + for { + dat, err := os.ReadFile(filepath.Join(currentDir, "go.mod")) + if os.IsNotExist(err) { + if currentDir == filepath.Dir(currentDir) { + // at the root + break + } + currentDir = filepath.Dir(currentDir) + continue + } else if err != nil { + return "", err + } + modulePath := modfile.ModulePath(dat) + return filepath.ToSlash(filepath.Join(modulePath, strings.TrimPrefix(srcDir, currentDir))), nil + } + } + // fall back to GOPATH mode + goPaths := os.Getenv("GOPATH") + if goPaths == "" { + return "", fmt.Errorf("GOPATH is not set") + } + goPathList := strings.Split(goPaths, string(os.PathListSeparator)) + for _, goPath := range goPathList { + sourceRoot := filepath.Join(goPath, "src") + string(os.PathSeparator) + if strings.HasPrefix(srcDir, sourceRoot) { + return filepath.ToSlash(strings.TrimPrefix(srcDir, sourceRoot)), nil + } + } + return "", errOutsideGoPath +} diff --git a/vendor/go.uber.org/mock/mockgen/model/model.go b/vendor/go.uber.org/mock/mockgen/model/model.go new file mode 100644 index 00000000..853dbf2d --- /dev/null +++ b/vendor/go.uber.org/mock/mockgen/model/model.go @@ -0,0 +1,533 @@ +// Copyright 2012 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package model contains the data model necessary for generating mock implementations. +package model + +import ( + "encoding/gob" + "fmt" + "io" + "reflect" + "strings" +) + +// pkgPath is the importable path for package model +const pkgPath = "go.uber.org/mock/mockgen/model" + +// Package is a Go package. It may be a subset. +type Package struct { + Name string + PkgPath string + Interfaces []*Interface + DotImports []string +} + +// Print writes the package name and its exported interfaces. +func (pkg *Package) Print(w io.Writer) { + _, _ = fmt.Fprintf(w, "package %s\n", pkg.Name) + for _, intf := range pkg.Interfaces { + intf.Print(w) + } +} + +// Imports returns the imports needed by the Package as a set of import paths. +func (pkg *Package) Imports() map[string]bool { + im := make(map[string]bool) + for _, intf := range pkg.Interfaces { + intf.addImports(im) + for _, tp := range intf.TypeParams { + tp.Type.addImports(im) + } + } + return im +} + +// Interface is a Go interface. +type Interface struct { + Name string + Methods []*Method + TypeParams []*Parameter +} + +// Print writes the interface name and its methods. +func (intf *Interface) Print(w io.Writer) { + _, _ = fmt.Fprintf(w, "interface %s\n", intf.Name) + for _, m := range intf.Methods { + m.Print(w) + } +} + +func (intf *Interface) addImports(im map[string]bool) { + for _, m := range intf.Methods { + m.addImports(im) + } +} + +// AddMethod adds a new method, de-duplicating by method name. +func (intf *Interface) AddMethod(m *Method) { + for _, me := range intf.Methods { + if me.Name == m.Name { + return + } + } + intf.Methods = append(intf.Methods, m) +} + +// Method is a single method of an interface. +type Method struct { + Name string + In, Out []*Parameter + Variadic *Parameter // may be nil +} + +// Print writes the method name and its signature. +func (m *Method) Print(w io.Writer) { + _, _ = fmt.Fprintf(w, " - method %s\n", m.Name) + if len(m.In) > 0 { + _, _ = fmt.Fprintf(w, " in:\n") + for _, p := range m.In { + p.Print(w) + } + } + if m.Variadic != nil { + _, _ = fmt.Fprintf(w, " ...:\n") + m.Variadic.Print(w) + } + if len(m.Out) > 0 { + _, _ = fmt.Fprintf(w, " out:\n") + for _, p := range m.Out { + p.Print(w) + } + } +} + +func (m *Method) addImports(im map[string]bool) { + for _, p := range m.In { + p.Type.addImports(im) + } + if m.Variadic != nil { + m.Variadic.Type.addImports(im) + } + for _, p := range m.Out { + p.Type.addImports(im) + } +} + +// Parameter is an argument or return parameter of a method. +type Parameter struct { + Name string // may be empty + Type Type +} + +// Print writes a method parameter. +func (p *Parameter) Print(w io.Writer) { + n := p.Name + if n == "" { + n = `""` + } + _, _ = fmt.Fprintf(w, " - %v: %v\n", n, p.Type.String(nil, "")) +} + +// Type is a Go type. +type Type interface { + String(pm map[string]string, pkgOverride string) string + addImports(im map[string]bool) +} + +func init() { + // Call gob.RegisterName with pkgPath as prefix to avoid conflicting with + // github.com/golang/mock/mockgen/model 's registration. + gob.RegisterName(pkgPath+".ArrayType", &ArrayType{}) + gob.RegisterName(pkgPath+".ChanType", &ChanType{}) + gob.RegisterName(pkgPath+".FuncType", &FuncType{}) + gob.RegisterName(pkgPath+".MapType", &MapType{}) + gob.RegisterName(pkgPath+".NamedType", &NamedType{}) + gob.RegisterName(pkgPath+".PointerType", &PointerType{}) + + // Call gob.RegisterName to make sure it has the consistent name registered + // for both gob decoder and encoder. + // + // For a non-pointer type, gob.Register will try to get package full path by + // calling rt.PkgPath() for a name to register. If your project has vendor + // directory, it is possible that PkgPath will get a path like this: + // ../../../vendor/go.uber.org/mock/mockgen/model + gob.RegisterName(pkgPath+".PredeclaredType", PredeclaredType("")) +} + +// ArrayType is an array or slice type. +type ArrayType struct { + Len int // -1 for slices, >= 0 for arrays + Type Type +} + +func (at *ArrayType) String(pm map[string]string, pkgOverride string) string { + s := "[]" + if at.Len > -1 { + s = fmt.Sprintf("[%d]", at.Len) + } + return s + at.Type.String(pm, pkgOverride) +} + +func (at *ArrayType) addImports(im map[string]bool) { at.Type.addImports(im) } + +// ChanType is a channel type. +type ChanType struct { + Dir ChanDir // 0, 1 or 2 + Type Type +} + +func (ct *ChanType) String(pm map[string]string, pkgOverride string) string { + s := ct.Type.String(pm, pkgOverride) + if ct.Dir == RecvDir { + return "<-chan " + s + } + if ct.Dir == SendDir { + return "chan<- " + s + } + return "chan " + s +} + +func (ct *ChanType) addImports(im map[string]bool) { ct.Type.addImports(im) } + +// ChanDir is a channel direction. +type ChanDir int + +// Constants for channel directions. +const ( + RecvDir ChanDir = 1 + SendDir ChanDir = 2 +) + +// FuncType is a function type. +type FuncType struct { + In, Out []*Parameter + Variadic *Parameter // may be nil +} + +func (ft *FuncType) String(pm map[string]string, pkgOverride string) string { + args := make([]string, len(ft.In)) + for i, p := range ft.In { + args[i] = p.Type.String(pm, pkgOverride) + } + if ft.Variadic != nil { + args = append(args, "..."+ft.Variadic.Type.String(pm, pkgOverride)) + } + rets := make([]string, len(ft.Out)) + for i, p := range ft.Out { + rets[i] = p.Type.String(pm, pkgOverride) + } + retString := strings.Join(rets, ", ") + if nOut := len(ft.Out); nOut == 1 { + retString = " " + retString + } else if nOut > 1 { + retString = " (" + retString + ")" + } + return "func(" + strings.Join(args, ", ") + ")" + retString +} + +func (ft *FuncType) addImports(im map[string]bool) { + for _, p := range ft.In { + p.Type.addImports(im) + } + if ft.Variadic != nil { + ft.Variadic.Type.addImports(im) + } + for _, p := range ft.Out { + p.Type.addImports(im) + } +} + +// MapType is a map type. +type MapType struct { + Key, Value Type +} + +func (mt *MapType) String(pm map[string]string, pkgOverride string) string { + return "map[" + mt.Key.String(pm, pkgOverride) + "]" + mt.Value.String(pm, pkgOverride) +} + +func (mt *MapType) addImports(im map[string]bool) { + mt.Key.addImports(im) + mt.Value.addImports(im) +} + +// NamedType is an exported type in a package. +type NamedType struct { + Package string // may be empty + Type string + TypeParams *TypeParametersType +} + +func (nt *NamedType) String(pm map[string]string, pkgOverride string) string { + if pkgOverride == nt.Package { + return nt.Type + nt.TypeParams.String(pm, pkgOverride) + } + prefix := pm[nt.Package] + if prefix != "" { + return prefix + "." + nt.Type + nt.TypeParams.String(pm, pkgOverride) + } + + return nt.Type + nt.TypeParams.String(pm, pkgOverride) +} + +func (nt *NamedType) addImports(im map[string]bool) { + if nt.Package != "" { + im[nt.Package] = true + } + nt.TypeParams.addImports(im) +} + +// PointerType is a pointer to another type. +type PointerType struct { + Type Type +} + +func (pt *PointerType) String(pm map[string]string, pkgOverride string) string { + return "*" + pt.Type.String(pm, pkgOverride) +} +func (pt *PointerType) addImports(im map[string]bool) { pt.Type.addImports(im) } + +// PredeclaredType is a predeclared type such as "int". +type PredeclaredType string + +func (pt PredeclaredType) String(map[string]string, string) string { return string(pt) } +func (pt PredeclaredType) addImports(map[string]bool) {} + +// TypeParametersType contains type parameters for a NamedType. +type TypeParametersType struct { + TypeParameters []Type +} + +func (tp *TypeParametersType) String(pm map[string]string, pkgOverride string) string { + if tp == nil || len(tp.TypeParameters) == 0 { + return "" + } + var sb strings.Builder + sb.WriteString("[") + for i, v := range tp.TypeParameters { + if i != 0 { + sb.WriteString(", ") + } + sb.WriteString(v.String(pm, pkgOverride)) + } + sb.WriteString("]") + return sb.String() +} + +func (tp *TypeParametersType) addImports(im map[string]bool) { + if tp == nil { + return + } + for _, v := range tp.TypeParameters { + v.addImports(im) + } +} + +// The following code is intended to be called by the program generated by ../reflect.go. + +// InterfaceFromInterfaceType returns a pointer to an interface for the +// given reflection interface type. +func InterfaceFromInterfaceType(it reflect.Type) (*Interface, error) { + if it.Kind() != reflect.Interface { + return nil, fmt.Errorf("%v is not an interface", it) + } + intf := &Interface{} + + for i := 0; i < it.NumMethod(); i++ { + mt := it.Method(i) + // TODO: need to skip unexported methods? or just raise an error? + m := &Method{ + Name: mt.Name, + } + + var err error + m.In, m.Variadic, m.Out, err = funcArgsFromType(mt.Type) + if err != nil { + return nil, err + } + + intf.AddMethod(m) + } + + return intf, nil +} + +// t's Kind must be a reflect.Func. +func funcArgsFromType(t reflect.Type) (in []*Parameter, variadic *Parameter, out []*Parameter, err error) { + nin := t.NumIn() + if t.IsVariadic() { + nin-- + } + var p *Parameter + for i := 0; i < nin; i++ { + p, err = parameterFromType(t.In(i)) + if err != nil { + return + } + in = append(in, p) + } + if t.IsVariadic() { + p, err = parameterFromType(t.In(nin).Elem()) + if err != nil { + return + } + variadic = p + } + for i := 0; i < t.NumOut(); i++ { + p, err = parameterFromType(t.Out(i)) + if err != nil { + return + } + out = append(out, p) + } + return +} + +func parameterFromType(t reflect.Type) (*Parameter, error) { + tt, err := typeFromType(t) + if err != nil { + return nil, err + } + return &Parameter{Type: tt}, nil +} + +var errorType = reflect.TypeOf((*error)(nil)).Elem() + +var byteType = reflect.TypeOf(byte(0)) + +func typeFromType(t reflect.Type) (Type, error) { + // Hack workaround for https://golang.org/issue/3853. + // This explicit check should not be necessary. + if t == byteType { + return PredeclaredType("byte"), nil + } + + if imp := t.PkgPath(); imp != "" { + return &NamedType{ + Package: impPath(imp), + Type: t.Name(), + }, nil + } + + // only unnamed or predeclared types after here + + // Lots of types have element types. Let's do the parsing and error checking for all of them. + var elemType Type + switch t.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Ptr, reflect.Slice: + var err error + elemType, err = typeFromType(t.Elem()) + if err != nil { + return nil, err + } + } + + switch t.Kind() { + case reflect.Array: + return &ArrayType{ + Len: t.Len(), + Type: elemType, + }, nil + case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, reflect.String: + return PredeclaredType(t.Kind().String()), nil + case reflect.Chan: + var dir ChanDir + switch t.ChanDir() { + case reflect.RecvDir: + dir = RecvDir + case reflect.SendDir: + dir = SendDir + } + return &ChanType{ + Dir: dir, + Type: elemType, + }, nil + case reflect.Func: + in, variadic, out, err := funcArgsFromType(t) + if err != nil { + return nil, err + } + return &FuncType{ + In: in, + Out: out, + Variadic: variadic, + }, nil + case reflect.Interface: + // Two special interfaces. + if t.NumMethod() == 0 { + return PredeclaredType("any"), nil + } + if t == errorType { + return PredeclaredType("error"), nil + } + case reflect.Map: + kt, err := typeFromType(t.Key()) + if err != nil { + return nil, err + } + return &MapType{ + Key: kt, + Value: elemType, + }, nil + case reflect.Ptr: + return &PointerType{ + Type: elemType, + }, nil + case reflect.Slice: + return &ArrayType{ + Len: -1, + Type: elemType, + }, nil + case reflect.Struct: + if t.NumField() == 0 { + return PredeclaredType("struct{}"), nil + } + } + + // TODO: Struct, UnsafePointer + return nil, fmt.Errorf("can't yet turn %v (%v) into a model.Type", t, t.Kind()) +} + +// impPath sanitizes the package path returned by `PkgPath` method of a reflect Type so that +// it is importable. PkgPath might return a path that includes "vendor". These paths do not +// compile, so we need to remove everything up to and including "/vendor/". +// See https://github.com/golang/go/issues/12019. +func impPath(imp string) string { + if strings.HasPrefix(imp, "vendor/") { + imp = "/" + imp + } + if i := strings.LastIndex(imp, "/vendor/"); i != -1 { + imp = imp[i+len("/vendor/"):] + } + return imp +} + +// ErrorInterface represent built-in error interface. +var ErrorInterface = Interface{ + Name: "error", + Methods: []*Method{ + { + Name: "Error", + Out: []*Parameter{ + { + Name: "", + Type: PredeclaredType("string"), + }, + }, + }, + }, +} diff --git a/vendor/go.uber.org/mock/mockgen/parse.go b/vendor/go.uber.org/mock/mockgen/parse.go new file mode 100644 index 00000000..f43321c3 --- /dev/null +++ b/vendor/go.uber.org/mock/mockgen/parse.go @@ -0,0 +1,805 @@ +// Copyright 2012 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +// This file contains the model construction by parsing source files. + +import ( + "errors" + "fmt" + "go/ast" + "go/build" + "go/importer" + "go/parser" + "go/token" + "go/types" + "log" + "os" + "path" + "path/filepath" + "strconv" + "strings" + + "go.uber.org/mock/mockgen/model" +) + +// sourceMode generates mocks via source file. +func sourceMode(source string) (*model.Package, error) { + srcDir, err := filepath.Abs(filepath.Dir(source)) + if err != nil { + return nil, fmt.Errorf("failed getting source directory: %v", err) + } + + packageImport, err := parsePackageImport(srcDir) + if err != nil { + return nil, err + } + + fs := token.NewFileSet() + file, err := parser.ParseFile(fs, source, nil, 0) + if err != nil { + return nil, fmt.Errorf("failed parsing source file %v: %v", source, err) + } + + p := &fileParser{ + fileSet: fs, + imports: make(map[string]importedPackage), + importedInterfaces: newInterfaceCache(), + auxInterfaces: newInterfaceCache(), + srcDir: srcDir, + } + + // Handle -imports. + dotImports := make(map[string]bool) + if *imports != "" { + for _, kv := range strings.Split(*imports, ",") { + eq := strings.Index(kv, "=") + k, v := kv[:eq], kv[eq+1:] + if k == "." { + dotImports[v] = true + } else { + p.imports[k] = importedPkg{path: v} + } + } + } + + if *excludeInterfaces != "" { + p.excludeNamesSet = parseExcludeInterfaces(*excludeInterfaces) + } + + // Handle -aux_files. + if err := p.parseAuxFiles(*auxFiles); err != nil { + return nil, err + } + p.addAuxInterfacesFromFile(packageImport, file) // this file + + pkg, err := p.parseFile(packageImport, file) + if err != nil { + return nil, err + } + for pkgPath := range dotImports { + pkg.DotImports = append(pkg.DotImports, pkgPath) + } + return pkg, nil +} + +type importedPackage interface { + Path() string + Parser() *fileParser +} + +type importedPkg struct { + path string + parser *fileParser +} + +func (i importedPkg) Path() string { return i.path } +func (i importedPkg) Parser() *fileParser { return i.parser } + +// duplicateImport is a bit of a misnomer. Currently the parser can't +// handle cases of multi-file packages importing different packages +// under the same name. Often these imports would not be problematic, +// so this type lets us defer raising an error unless the package name +// is actually used. +type duplicateImport struct { + name string + duplicates []string +} + +func (d duplicateImport) Error() string { + return fmt.Sprintf("%q is ambiguous because of duplicate imports: %v", d.name, d.duplicates) +} + +func (d duplicateImport) Path() string { log.Fatal(d.Error()); return "" } +func (d duplicateImport) Parser() *fileParser { log.Fatal(d.Error()); return nil } + +type interfaceCache struct { + m map[string]map[string]*namedInterface +} + +func newInterfaceCache() *interfaceCache { + return &interfaceCache{ + m: make(map[string]map[string]*namedInterface), + } +} + +func (i *interfaceCache) Set(pkg, name string, it *namedInterface) { + if _, ok := i.m[pkg]; !ok { + i.m[pkg] = make(map[string]*namedInterface) + } + i.m[pkg][name] = it +} + +func (i *interfaceCache) Get(pkg, name string) *namedInterface { + if _, ok := i.m[pkg]; !ok { + return nil + } + return i.m[pkg][name] +} + +func (i *interfaceCache) GetASTIface(pkg, name string) *ast.InterfaceType { + if _, ok := i.m[pkg]; !ok { + return nil + } + it, ok := i.m[pkg][name] + if !ok { + return nil + } + return it.it +} + +type fileParser struct { + fileSet *token.FileSet + imports map[string]importedPackage // package name => imported package + importedInterfaces *interfaceCache + auxFiles []*ast.File + auxInterfaces *interfaceCache + srcDir string + excludeNamesSet map[string]struct{} +} + +func (p *fileParser) errorf(pos token.Pos, format string, args ...any) error { + ps := p.fileSet.Position(pos) + format = "%s:%d:%d: " + format + args = append([]any{ps.Filename, ps.Line, ps.Column}, args...) + return fmt.Errorf(format, args...) +} + +func (p *fileParser) parseAuxFiles(auxFiles string) error { + auxFiles = strings.TrimSpace(auxFiles) + if auxFiles == "" { + return nil + } + for _, kv := range strings.Split(auxFiles, ",") { + parts := strings.SplitN(kv, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("bad aux file spec: %v", kv) + } + pkg, fpath := parts[0], parts[1] + + file, err := parser.ParseFile(p.fileSet, fpath, nil, 0) + if err != nil { + return err + } + p.auxFiles = append(p.auxFiles, file) + p.addAuxInterfacesFromFile(pkg, file) + } + return nil +} + +func (p *fileParser) addAuxInterfacesFromFile(pkg string, file *ast.File) { + for ni := range iterInterfaces(file) { + p.auxInterfaces.Set(pkg, ni.name.Name, ni) + } +} + +// parseFile loads all file imports and auxiliary files import into the +// fileParser, parses all file interfaces and returns package model. +func (p *fileParser) parseFile(importPath string, file *ast.File) (*model.Package, error) { + allImports, dotImports := importsOfFile(file) + // Don't stomp imports provided by -imports. Those should take precedence. + for pkg, pkgI := range allImports { + if _, ok := p.imports[pkg]; !ok { + p.imports[pkg] = pkgI + } + } + // Add imports from auxiliary files, which might be needed for embedded interfaces. + // Don't stomp any other imports. + for _, f := range p.auxFiles { + auxImports, _ := importsOfFile(f) + for pkg, pkgI := range auxImports { + if _, ok := p.imports[pkg]; !ok { + p.imports[pkg] = pkgI + } + } + } + + var is []*model.Interface + for ni := range iterInterfaces(file) { + if _, ok := p.excludeNamesSet[ni.name.String()]; ok { + continue + } + i, err := p.parseInterface(ni.name.String(), importPath, ni) + if errors.Is(err, errConstraintInterface) { + continue + } + if err != nil { + return nil, err + } + is = append(is, i) + } + return &model.Package{ + Name: file.Name.String(), + PkgPath: importPath, + Interfaces: is, + DotImports: dotImports, + }, nil +} + +// parsePackage loads package specified by path, parses it and returns +// a new fileParser with the parsed imports and interfaces. +func (p *fileParser) parsePackage(path string) (*fileParser, error) { + newP := &fileParser{ + fileSet: token.NewFileSet(), + imports: make(map[string]importedPackage), + importedInterfaces: newInterfaceCache(), + auxInterfaces: newInterfaceCache(), + srcDir: p.srcDir, + } + + var pkgs map[string]*ast.Package + if imp, err := build.Import(path, newP.srcDir, build.FindOnly); err != nil { + return nil, err + } else if pkgs, err = parser.ParseDir(newP.fileSet, imp.Dir, nil, 0); err != nil { + return nil, err + } + + for _, pkg := range pkgs { + file := ast.MergePackageFiles(pkg, ast.FilterFuncDuplicates|ast.FilterUnassociatedComments|ast.FilterImportDuplicates) + for ni := range iterInterfaces(file) { + newP.importedInterfaces.Set(path, ni.name.Name, ni) + } + imports, _ := importsOfFile(file) + for pkgName, pkgI := range imports { + newP.imports[pkgName] = pkgI + } + } + return newP, nil +} + +func (p *fileParser) constructInstParams(pkg string, params []*ast.Field, instParams []model.Type, embeddedInstParams []ast.Expr, tps map[string]model.Type) ([]model.Type, error) { + pm := make(map[string]int) + var i int + for _, v := range params { + for _, n := range v.Names { + pm[n.Name] = i + instParams = append(instParams, model.PredeclaredType(n.Name)) + i++ + } + } + + var runtimeInstParams []model.Type + for _, instParam := range embeddedInstParams { + switch t := instParam.(type) { + case *ast.Ident: + if idx, ok := pm[t.Name]; ok { + runtimeInstParams = append(runtimeInstParams, instParams[idx]) + continue + } + } + modelType, err := p.parseType(pkg, instParam, tps) + if err != nil { + return nil, err + } + runtimeInstParams = append(runtimeInstParams, modelType) + } + + return runtimeInstParams, nil +} + +func (p *fileParser) constructTps(it *namedInterface) (tps map[string]model.Type) { + tps = make(map[string]model.Type) + n := 0 + for _, tp := range it.typeParams { + for _, tm := range tp.Names { + tps[tm.Name] = nil + if len(it.instTypes) != 0 { + tps[tm.Name] = it.instTypes[n] + n++ + } + } + } + return tps +} + +// parseInterface loads interface specified by pkg and name, parses it and returns +// a new model with the parsed. +func (p *fileParser) parseInterface(name, pkg string, it *namedInterface) (*model.Interface, error) { + iface := &model.Interface{Name: name} + tps := p.constructTps(it) + tp, err := p.parseFieldList(pkg, it.typeParams, tps) + if err != nil { + return nil, fmt.Errorf("unable to parse interface type parameters: %v", name) + } + + iface.TypeParams = tp + for _, field := range it.it.Methods.List { + var methods []*model.Method + if methods, err = p.parseMethod(field, it, iface, pkg, tps); err != nil { + return nil, err + } + for _, m := range methods { + iface.AddMethod(m) + } + } + return iface, nil +} + +func (p *fileParser) parseMethod(field *ast.Field, it *namedInterface, iface *model.Interface, pkg string, tps map[string]model.Type) ([]*model.Method, error) { + // {} for git diff + { + switch v := field.Type.(type) { + case *ast.FuncType: + if nn := len(field.Names); nn != 1 { + return nil, fmt.Errorf("expected one name for interface %v, got %d", iface.Name, nn) + } + m := &model.Method{ + Name: field.Names[0].String(), + } + var err error + m.In, m.Variadic, m.Out, err = p.parseFunc(pkg, v, tps) + if err != nil { + return nil, err + } + return []*model.Method{m}, nil + case *ast.Ident: + // Embedded interface in this package. + embeddedIfaceType := p.auxInterfaces.Get(pkg, v.String()) + if embeddedIfaceType == nil { + embeddedIfaceType = p.importedInterfaces.Get(pkg, v.String()) + } + + var embeddedIface *model.Interface + if embeddedIfaceType != nil { + var err error + embeddedIfaceType.instTypes, err = p.constructInstParams(pkg, it.typeParams, it.instTypes, it.embeddedInstTypeParams, tps) + if err != nil { + return nil, err + } + embeddedIface, err = p.parseInterface(v.String(), pkg, embeddedIfaceType) + if err != nil { + return nil, err + } + + } else { + // This is built-in error interface. + if v.String() == model.ErrorInterface.Name { + embeddedIface = &model.ErrorInterface + } else { + ip, err := p.parsePackage(pkg) + if err != nil { + return nil, p.errorf(v.Pos(), "could not parse package %s: %v", pkg, err) + } + + if embeddedIfaceType = ip.importedInterfaces.Get(pkg, v.String()); embeddedIfaceType == nil { + return nil, p.errorf(v.Pos(), "unknown embedded interface %s.%s", pkg, v.String()) + } + + embeddedIfaceType.instTypes, err = p.constructInstParams(pkg, it.typeParams, it.instTypes, it.embeddedInstTypeParams, tps) + if err != nil { + return nil, err + } + embeddedIface, err = ip.parseInterface(v.String(), pkg, embeddedIfaceType) + if err != nil { + return nil, err + } + } + } + return embeddedIface.Methods, nil + case *ast.SelectorExpr: + // Embedded interface in another package. + filePkg, sel := v.X.(*ast.Ident).String(), v.Sel.String() + embeddedPkg, ok := p.imports[filePkg] + if !ok { + return nil, p.errorf(v.X.Pos(), "unknown package %s", filePkg) + } + + var embeddedIface *model.Interface + var err error + embeddedIfaceType := p.auxInterfaces.Get(filePkg, sel) + if embeddedIfaceType != nil { + embeddedIfaceType.instTypes, err = p.constructInstParams(pkg, it.typeParams, it.instTypes, it.embeddedInstTypeParams, tps) + if err != nil { + return nil, err + } + embeddedIface, err = p.parseInterface(sel, filePkg, embeddedIfaceType) + if err != nil { + return nil, err + } + } else { + path := embeddedPkg.Path() + parser := embeddedPkg.Parser() + if parser == nil { + ip, err := p.parsePackage(path) + if err != nil { + return nil, p.errorf(v.Pos(), "could not parse package %s: %v", path, err) + } + parser = ip + p.imports[filePkg] = importedPkg{ + path: embeddedPkg.Path(), + parser: parser, + } + } + if embeddedIfaceType = parser.importedInterfaces.Get(path, sel); embeddedIfaceType == nil { + return nil, p.errorf(v.Pos(), "unknown embedded interface %s.%s", path, sel) + } + + embeddedIfaceType.instTypes, err = p.constructInstParams(pkg, it.typeParams, it.instTypes, it.embeddedInstTypeParams, tps) + if err != nil { + return nil, err + } + embeddedIface, err = parser.parseInterface(sel, path, embeddedIfaceType) + if err != nil { + return nil, err + } + } + // TODO: apply shadowing rules. + return embeddedIface.Methods, nil + default: + return p.parseGenericMethod(field, it, iface, pkg, tps) + } + } +} + +func (p *fileParser) parseFunc(pkg string, f *ast.FuncType, tps map[string]model.Type) (inParam []*model.Parameter, variadic *model.Parameter, outParam []*model.Parameter, err error) { + if f.Params != nil { + regParams := f.Params.List + if isVariadic(f) { + n := len(regParams) + varParams := regParams[n-1:] + regParams = regParams[:n-1] + vp, err := p.parseFieldList(pkg, varParams, tps) + if err != nil { + return nil, nil, nil, p.errorf(varParams[0].Pos(), "failed parsing variadic argument: %v", err) + } + variadic = vp[0] + } + inParam, err = p.parseFieldList(pkg, regParams, tps) + if err != nil { + return nil, nil, nil, p.errorf(f.Pos(), "failed parsing arguments: %v", err) + } + } + if f.Results != nil { + outParam, err = p.parseFieldList(pkg, f.Results.List, tps) + if err != nil { + return nil, nil, nil, p.errorf(f.Pos(), "failed parsing returns: %v", err) + } + } + return +} + +func (p *fileParser) parseFieldList(pkg string, fields []*ast.Field, tps map[string]model.Type) ([]*model.Parameter, error) { + nf := 0 + for _, f := range fields { + nn := len(f.Names) + if nn == 0 { + nn = 1 // anonymous parameter + } + nf += nn + } + if nf == 0 { + return nil, nil + } + ps := make([]*model.Parameter, nf) + i := 0 // destination index + for _, f := range fields { + t, err := p.parseType(pkg, f.Type, tps) + if err != nil { + return nil, err + } + + if len(f.Names) == 0 { + // anonymous arg + ps[i] = &model.Parameter{Type: t} + i++ + continue + } + for _, name := range f.Names { + ps[i] = &model.Parameter{Name: name.Name, Type: t} + i++ + } + } + return ps, nil +} + +func (p *fileParser) parseType(pkg string, typ ast.Expr, tps map[string]model.Type) (model.Type, error) { + switch v := typ.(type) { + case *ast.ArrayType: + ln := -1 + if v.Len != nil { + value, err := p.parseArrayLength(v.Len) + if err != nil { + return nil, err + } + ln, err = strconv.Atoi(value) + if err != nil { + return nil, p.errorf(v.Len.Pos(), "bad array size: %v", err) + } + } + t, err := p.parseType(pkg, v.Elt, tps) + if err != nil { + return nil, err + } + return &model.ArrayType{Len: ln, Type: t}, nil + case *ast.ChanType: + t, err := p.parseType(pkg, v.Value, tps) + if err != nil { + return nil, err + } + var dir model.ChanDir + if v.Dir == ast.SEND { + dir = model.SendDir + } + if v.Dir == ast.RECV { + dir = model.RecvDir + } + return &model.ChanType{Dir: dir, Type: t}, nil + case *ast.Ellipsis: + // assume we're parsing a variadic argument + return p.parseType(pkg, v.Elt, tps) + case *ast.FuncType: + in, variadic, out, err := p.parseFunc(pkg, v, tps) + if err != nil { + return nil, err + } + return &model.FuncType{In: in, Out: out, Variadic: variadic}, nil + case *ast.Ident: + it, ok := tps[v.Name] + if v.IsExported() && !ok { + // `pkg` may be an aliased imported pkg + // if so, patch the import w/ the fully qualified import + maybeImportedPkg, ok := p.imports[pkg] + if ok { + pkg = maybeImportedPkg.Path() + } + // assume type in this package + return &model.NamedType{Package: pkg, Type: v.Name}, nil + } + if ok && it != nil { + return it, nil + } + // assume predeclared type + return model.PredeclaredType(v.Name), nil + case *ast.InterfaceType: + if v.Methods != nil && len(v.Methods.List) > 0 { + return nil, p.errorf(v.Pos(), "can't handle non-empty unnamed interface types") + } + return model.PredeclaredType("any"), nil + case *ast.MapType: + key, err := p.parseType(pkg, v.Key, tps) + if err != nil { + return nil, err + } + value, err := p.parseType(pkg, v.Value, tps) + if err != nil { + return nil, err + } + return &model.MapType{Key: key, Value: value}, nil + case *ast.SelectorExpr: + pkgName := v.X.(*ast.Ident).String() + pkg, ok := p.imports[pkgName] + if !ok { + return nil, p.errorf(v.Pos(), "unknown package %q", pkgName) + } + return &model.NamedType{Package: pkg.Path(), Type: v.Sel.String()}, nil + case *ast.StarExpr: + t, err := p.parseType(pkg, v.X, tps) + if err != nil { + return nil, err + } + return &model.PointerType{Type: t}, nil + case *ast.StructType: + if v.Fields != nil && len(v.Fields.List) > 0 { + return nil, p.errorf(v.Pos(), "can't handle non-empty unnamed struct types") + } + return model.PredeclaredType("struct{}"), nil + case *ast.ParenExpr: + return p.parseType(pkg, v.X, tps) + default: + mt, err := p.parseGenericType(pkg, typ, tps) + if err != nil { + return nil, err + } + if mt == nil { + break + } + return mt, nil + } + + return nil, fmt.Errorf("don't know how to parse type %T", typ) +} + +func (p *fileParser) parseArrayLength(expr ast.Expr) (string, error) { + switch val := expr.(type) { + case (*ast.BasicLit): + return val.Value, nil + case (*ast.Ident): + // when the length is a const defined locally + return val.Obj.Decl.(*ast.ValueSpec).Values[0].(*ast.BasicLit).Value, nil + case (*ast.SelectorExpr): + // when the length is a const defined in an external package + usedPkg, err := importer.Default().Import(fmt.Sprintf("%s", val.X)) + if err != nil { + return "", p.errorf(expr.Pos(), "unknown package in array length: %v", err) + } + ev, err := types.Eval(token.NewFileSet(), usedPkg, token.NoPos, val.Sel.Name) + if err != nil { + return "", p.errorf(expr.Pos(), "unknown constant in array length: %v", err) + } + return ev.Value.String(), nil + case (*ast.ParenExpr): + return p.parseArrayLength(val.X) + case (*ast.BinaryExpr): + x, err := p.parseArrayLength(val.X) + if err != nil { + return "", err + } + y, err := p.parseArrayLength(val.Y) + if err != nil { + return "", err + } + biExpr := fmt.Sprintf("%s%v%s", x, val.Op, y) + tv, err := types.Eval(token.NewFileSet(), nil, token.NoPos, biExpr) + if err != nil { + return "", p.errorf(expr.Pos(), "invalid expression in array length: %v", err) + } + return tv.Value.String(), nil + default: + return "", p.errorf(expr.Pos(), "invalid expression in array length: %v", val) + } +} + +// importsOfFile returns a map of package name to import path +// of the imports in file. +func importsOfFile(file *ast.File) (normalImports map[string]importedPackage, dotImports []string) { + var importPaths []string + for _, is := range file.Imports { + if is.Name != nil { + continue + } + importPath := is.Path.Value[1 : len(is.Path.Value)-1] // remove quotes + importPaths = append(importPaths, importPath) + } + packagesName := createPackageMap(importPaths) + normalImports = make(map[string]importedPackage) + dotImports = make([]string, 0) + for _, is := range file.Imports { + var pkgName string + importPath := is.Path.Value[1 : len(is.Path.Value)-1] // remove quotes + + if is.Name != nil { + // Named imports are always certain. + if is.Name.Name == "_" { + continue + } + pkgName = is.Name.Name + } else { + pkg, ok := packagesName[importPath] + if !ok { + // Fallback to import path suffix. Note that this is uncertain. + _, last := path.Split(importPath) + // If the last path component has dots, the first dot-delimited + // field is used as the name. + pkgName = strings.SplitN(last, ".", 2)[0] + } else { + pkgName = pkg + } + } + + if pkgName == "." { + dotImports = append(dotImports, importPath) + } else { + if pkg, ok := normalImports[pkgName]; ok { + switch p := pkg.(type) { + case duplicateImport: + normalImports[pkgName] = duplicateImport{ + name: p.name, + duplicates: append([]string{importPath}, p.duplicates...), + } + case importedPkg: + normalImports[pkgName] = duplicateImport{ + name: pkgName, + duplicates: []string{p.path, importPath}, + } + } + } else { + normalImports[pkgName] = importedPkg{path: importPath} + } + } + } + return +} + +type namedInterface struct { + name *ast.Ident + it *ast.InterfaceType + typeParams []*ast.Field + embeddedInstTypeParams []ast.Expr + instTypes []model.Type +} + +// Create an iterator over all interfaces in file. +func iterInterfaces(file *ast.File) <-chan *namedInterface { + ch := make(chan *namedInterface) + go func() { + for _, decl := range file.Decls { + gd, ok := decl.(*ast.GenDecl) + if !ok || gd.Tok != token.TYPE { + continue + } + for _, spec := range gd.Specs { + ts, ok := spec.(*ast.TypeSpec) + if !ok { + continue + } + it, ok := ts.Type.(*ast.InterfaceType) + if !ok { + continue + } + + ch <- &namedInterface{name: ts.Name, it: it, typeParams: getTypeSpecTypeParams(ts)} + } + } + close(ch) + }() + return ch +} + +// isVariadic returns whether the function is variadic. +func isVariadic(f *ast.FuncType) bool { + nargs := len(f.Params.List) + if nargs == 0 { + return false + } + _, ok := f.Params.List[nargs-1].Type.(*ast.Ellipsis) + return ok +} + +// packageNameOfDir get package import path via dir +func packageNameOfDir(srcDir string) (string, error) { + files, err := os.ReadDir(srcDir) + if err != nil { + log.Fatal(err) + } + + var goFilePath string + for _, file := range files { + if !file.IsDir() && strings.HasSuffix(file.Name(), ".go") { + goFilePath = file.Name() + break + } + } + if goFilePath == "" { + return "", fmt.Errorf("go source file not found %s", srcDir) + } + + packageImport, err := parsePackageImport(srcDir) + if err != nil { + return "", err + } + return packageImport, nil +} + +var errOutsideGoPath = errors.New("source directory is outside GOPATH") diff --git a/vendor/go.uber.org/mock/mockgen/reflect.go b/vendor/go.uber.org/mock/mockgen/reflect.go new file mode 100644 index 00000000..ca80ebbb --- /dev/null +++ b/vendor/go.uber.org/mock/mockgen/reflect.go @@ -0,0 +1,256 @@ +// Copyright 2012 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +// This file contains the model construction by reflection. + +import ( + "bytes" + "encoding/gob" + "flag" + "fmt" + "go/build" + "io" + "log" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "text/template" + + "go.uber.org/mock/mockgen/model" +) + +var ( + progOnly = flag.Bool("prog_only", false, "(reflect mode) Only generate the reflection program; write it to stdout and exit.") + execOnly = flag.String("exec_only", "", "(reflect mode) If set, execute this reflection program.") + buildFlags = flag.String("build_flags", "", "(reflect mode) Additional flags for go build.") +) + +// reflectMode generates mocks via reflection on an interface. +func reflectMode(importPath string, symbols []string) (*model.Package, error) { + if *execOnly != "" { + return run(*execOnly) + } + + program, err := writeProgram(importPath, symbols) + if err != nil { + return nil, err + } + + if *progOnly { + if _, err := os.Stdout.Write(program); err != nil { + return nil, err + } + os.Exit(0) + } + + wd, _ := os.Getwd() + + // Try to run the reflection program in the current working directory. + if p, err := runInDir(program, wd); err == nil { + return p, nil + } + + // Try to run the program in the same directory as the input package. + if p, err := build.Import(importPath, wd, build.FindOnly); err == nil { + dir := p.Dir + if p, err := runInDir(program, dir); err == nil { + return p, nil + } + } + + // Try to run it in a standard temp directory. + return runInDir(program, "") +} + +func writeProgram(importPath string, symbols []string) ([]byte, error) { + var program bytes.Buffer + data := reflectData{ + ImportPath: importPath, + Symbols: symbols, + } + if err := reflectProgram.Execute(&program, &data); err != nil { + return nil, err + } + return program.Bytes(), nil +} + +// run the given program and parse the output as a model.Package. +func run(program string) (*model.Package, error) { + f, err := os.CreateTemp("", "") + if err != nil { + return nil, err + } + + filename := f.Name() + defer os.Remove(filename) + if err := f.Close(); err != nil { + return nil, err + } + + // Run the program. + cmd := exec.Command(program, "-output", filename) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return nil, err + } + + f, err = os.Open(filename) + if err != nil { + return nil, err + } + + // Process output. + var pkg model.Package + if err := gob.NewDecoder(f).Decode(&pkg); err != nil { + return nil, err + } + + if err := f.Close(); err != nil { + return nil, err + } + + return &pkg, nil +} + +// runInDir writes the given program into the given dir, runs it there, and +// parses the output as a model.Package. +func runInDir(program []byte, dir string) (*model.Package, error) { + // We use TempDir instead of TempFile so we can control the filename. + tmpDir, err := os.MkdirTemp(dir, "gomock_reflect_") + if err != nil { + return nil, err + } + defer func() { + if err := os.RemoveAll(tmpDir); err != nil { + log.Printf("failed to remove temp directory: %s", err) + } + }() + const progSource = "prog.go" + var progBinary = "prog.bin" + if runtime.GOOS == "windows" { + // Windows won't execute a program unless it has a ".exe" suffix. + progBinary += ".exe" + } + + if err := os.WriteFile(filepath.Join(tmpDir, progSource), program, 0600); err != nil { + return nil, err + } + + cmdArgs := []string{} + cmdArgs = append(cmdArgs, "build") + if *buildFlags != "" { + cmdArgs = append(cmdArgs, strings.Split(*buildFlags, " ")...) + } + cmdArgs = append(cmdArgs, "-o", progBinary, progSource) + + // Build the program. + buf := bytes.NewBuffer(nil) + cmd := exec.Command("go", cmdArgs...) + cmd.Dir = tmpDir + cmd.Stdout = os.Stdout + cmd.Stderr = io.MultiWriter(os.Stderr, buf) + if err := cmd.Run(); err != nil { + sErr := buf.String() + if strings.Contains(sErr, `cannot find package "."`) && + strings.Contains(sErr, "go.uber.org/mock/mockgen/model") { + fmt.Fprint(os.Stderr, "Please reference the steps in the README to fix this error:\n\thttps://go.uber.org/mock#reflect-vendoring-error.\n") + return nil, err + } + return nil, err + } + + return run(filepath.Join(tmpDir, progBinary)) +} + +type reflectData struct { + ImportPath string + Symbols []string +} + +// This program reflects on an interface value, and prints the +// gob encoding of a model.Package to standard output. +// JSON doesn't work because of the model.Type interface. +var reflectProgram = template.Must(template.New("program").Parse(` +// Code generated by MockGen. DO NOT EDIT. +package main + +import ( + "encoding/gob" + "flag" + "fmt" + "os" + "path" + "reflect" + + "go.uber.org/mock/mockgen/model" + + pkg_ {{printf "%q" .ImportPath}} +) + +var output = flag.String("output", "", "The output file name, or empty to use stdout.") + +func main() { + flag.Parse() + + its := []struct{ + sym string + typ reflect.Type + }{ + {{range .Symbols}} + { {{printf "%q" .}}, reflect.TypeOf((*pkg_.{{.}})(nil)).Elem()}, + {{end}} + } + pkg := &model.Package{ + // NOTE: This behaves contrary to documented behaviour if the + // package name is not the final component of the import path. + // The reflect package doesn't expose the package name, though. + Name: path.Base({{printf "%q" .ImportPath}}), + } + + for _, it := range its { + intf, err := model.InterfaceFromInterfaceType(it.typ) + if err != nil { + fmt.Fprintf(os.Stderr, "Reflection: %v\n", err) + os.Exit(1) + } + intf.Name = it.sym + pkg.Interfaces = append(pkg.Interfaces, intf) + } + + outfile := os.Stdout + if len(*output) != 0 { + var err error + outfile, err = os.Create(*output) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to open output file %q", *output) + } + defer func() { + if err := outfile.Close(); err != nil { + fmt.Fprintf(os.Stderr, "failed to close output file %q", *output) + os.Exit(1) + } + }() + } + + if err := gob.NewEncoder(outfile).Encode(pkg); err != nil { + fmt.Fprintf(os.Stderr, "gob encode: %v\n", err) + os.Exit(1) + } +} +`)) diff --git a/vendor/go.uber.org/mock/mockgen/version.go b/vendor/go.uber.org/mock/mockgen/version.go new file mode 100644 index 00000000..6db160ac --- /dev/null +++ b/vendor/go.uber.org/mock/mockgen/version.go @@ -0,0 +1,31 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "fmt" + "log" + "runtime/debug" +) + +func printModuleVersion() { + if bi, exists := debug.ReadBuildInfo(); exists { + fmt.Println(bi.Main.Version) + } else { + log.Printf("No version information found. Make sure to use " + + "GO111MODULE=on when running 'go get' in order to use specific " + + "version of the binary.") + } +} diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/exp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/exp/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/exp/rand/exp.go b/vendor/golang.org/x/exp/rand/exp.go new file mode 100644 index 00000000..08386727 --- /dev/null +++ b/vendor/golang.org/x/exp/rand/exp.go @@ -0,0 +1,221 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rand + +import ( + "math" +) + +/* + * Exponential distribution + * + * See "The Ziggurat Method for Generating Random Variables" + * (Marsaglia & Tsang, 2000) + * http://www.jstatsoft.org/v05/i08/paper [pdf] + */ + +const ( + re = 7.69711747013104972 +) + +// ExpFloat64 returns an exponentially distributed float64 in the range +// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter +// (lambda) is 1 and whose mean is 1/lambda (1). +// To produce a distribution with a different rate parameter, +// callers can adjust the output using: +// +// sample = ExpFloat64() / desiredRateParameter +func (r *Rand) ExpFloat64() float64 { + for { + j := r.Uint32() + i := j & 0xFF + x := float64(j) * float64(we[i]) + if j < ke[i] { + return x + } + if i == 0 { + return re - math.Log(r.Float64()) + } + if fe[i]+float32(r.Float64())*(fe[i-1]-fe[i]) < float32(math.Exp(-x)) { + return x + } + } +} + +var ke = [256]uint32{ + 0xe290a139, 0x0, 0x9beadebc, 0xc377ac71, 0xd4ddb990, + 0xde893fb8, 0xe4a8e87c, 0xe8dff16a, 0xebf2deab, 0xee49a6e8, + 0xf0204efd, 0xf19bdb8e, 0xf2d458bb, 0xf3da104b, 0xf4b86d78, + 0xf577ad8a, 0xf61de83d, 0xf6afb784, 0xf730a573, 0xf7a37651, + 0xf80a5bb6, 0xf867189d, 0xf8bb1b4f, 0xf9079062, 0xf94d70ca, + 0xf98d8c7d, 0xf9c8928a, 0xf9ff175b, 0xfa319996, 0xfa6085f8, + 0xfa8c3a62, 0xfab5084e, 0xfadb36c8, 0xfaff0410, 0xfb20a6ea, + 0xfb404fb4, 0xfb5e2951, 0xfb7a59e9, 0xfb95038c, 0xfbae44ba, + 0xfbc638d8, 0xfbdcf892, 0xfbf29a30, 0xfc0731df, 0xfc1ad1ed, + 0xfc2d8b02, 0xfc3f6c4d, 0xfc5083ac, 0xfc60ddd1, 0xfc708662, + 0xfc7f8810, 0xfc8decb4, 0xfc9bbd62, 0xfca9027c, 0xfcb5c3c3, + 0xfcc20864, 0xfccdd70a, 0xfcd935e3, 0xfce42ab0, 0xfceebace, + 0xfcf8eb3b, 0xfd02c0a0, 0xfd0c3f59, 0xfd156b7b, 0xfd1e48d6, + 0xfd26daff, 0xfd2f2552, 0xfd372af7, 0xfd3eeee5, 0xfd4673e7, + 0xfd4dbc9e, 0xfd54cb85, 0xfd5ba2f2, 0xfd62451b, 0xfd68b415, + 0xfd6ef1da, 0xfd750047, 0xfd7ae120, 0xfd809612, 0xfd8620b4, + 0xfd8b8285, 0xfd90bcf5, 0xfd95d15e, 0xfd9ac10b, 0xfd9f8d36, + 0xfda43708, 0xfda8bf9e, 0xfdad2806, 0xfdb17141, 0xfdb59c46, + 0xfdb9a9fd, 0xfdbd9b46, 0xfdc170f6, 0xfdc52bd8, 0xfdc8ccac, + 0xfdcc542d, 0xfdcfc30b, 0xfdd319ef, 0xfdd6597a, 0xfdd98245, + 0xfddc94e5, 0xfddf91e6, 0xfde279ce, 0xfde54d1f, 0xfde80c52, + 0xfdeab7de, 0xfded5034, 0xfdefd5be, 0xfdf248e3, 0xfdf4aa06, + 0xfdf6f984, 0xfdf937b6, 0xfdfb64f4, 0xfdfd818d, 0xfdff8dd0, + 0xfe018a08, 0xfe03767a, 0xfe05536c, 0xfe07211c, 0xfe08dfc9, + 0xfe0a8fab, 0xfe0c30fb, 0xfe0dc3ec, 0xfe0f48b1, 0xfe10bf76, + 0xfe122869, 0xfe1383b4, 0xfe14d17c, 0xfe1611e7, 0xfe174516, + 0xfe186b2a, 0xfe19843e, 0xfe1a9070, 0xfe1b8fd6, 0xfe1c8289, + 0xfe1d689b, 0xfe1e4220, 0xfe1f0f26, 0xfe1fcfbc, 0xfe2083ed, + 0xfe212bc3, 0xfe21c745, 0xfe225678, 0xfe22d95f, 0xfe234ffb, + 0xfe23ba4a, 0xfe241849, 0xfe2469f2, 0xfe24af3c, 0xfe24e81e, + 0xfe25148b, 0xfe253474, 0xfe2547c7, 0xfe254e70, 0xfe25485a, + 0xfe25356a, 0xfe251586, 0xfe24e88f, 0xfe24ae64, 0xfe2466e1, + 0xfe2411df, 0xfe23af34, 0xfe233eb4, 0xfe22c02c, 0xfe22336b, + 0xfe219838, 0xfe20ee58, 0xfe20358c, 0xfe1f6d92, 0xfe1e9621, + 0xfe1daef0, 0xfe1cb7ac, 0xfe1bb002, 0xfe1a9798, 0xfe196e0d, + 0xfe1832fd, 0xfe16e5fe, 0xfe15869d, 0xfe141464, 0xfe128ed3, + 0xfe10f565, 0xfe0f478c, 0xfe0d84b1, 0xfe0bac36, 0xfe09bd73, + 0xfe07b7b5, 0xfe059a40, 0xfe03644c, 0xfe011504, 0xfdfeab88, + 0xfdfc26e9, 0xfdf98629, 0xfdf6c83b, 0xfdf3ec01, 0xfdf0f04a, + 0xfdedd3d1, 0xfdea953d, 0xfde7331e, 0xfde3abe9, 0xfddffdfb, + 0xfddc2791, 0xfdd826cd, 0xfdd3f9a8, 0xfdcf9dfc, 0xfdcb1176, + 0xfdc65198, 0xfdc15bb3, 0xfdbc2ce2, 0xfdb6c206, 0xfdb117be, + 0xfdab2a63, 0xfda4f5fd, 0xfd9e7640, 0xfd97a67a, 0xfd908192, + 0xfd8901f2, 0xfd812182, 0xfd78d98e, 0xfd7022bb, 0xfd66f4ed, + 0xfd5d4732, 0xfd530f9c, 0xfd48432b, 0xfd3cd59a, 0xfd30b936, + 0xfd23dea4, 0xfd16349e, 0xfd07a7a3, 0xfcf8219b, 0xfce7895b, + 0xfcd5c220, 0xfcc2aadb, 0xfcae1d5e, 0xfc97ed4e, 0xfc7fe6d4, + 0xfc65ccf3, 0xfc495762, 0xfc2a2fc8, 0xfc07ee19, 0xfbe213c1, + 0xfbb8051a, 0xfb890078, 0xfb5411a5, 0xfb180005, 0xfad33482, + 0xfa839276, 0xfa263b32, 0xf9b72d1c, 0xf930a1a2, 0xf889f023, + 0xf7b577d2, 0xf69c650c, 0xf51530f0, 0xf2cb0e3c, 0xeeefb15d, + 0xe6da6ecf, +} +var we = [256]float32{ + 2.0249555e-09, 1.486674e-11, 2.4409617e-11, 3.1968806e-11, + 3.844677e-11, 4.4228204e-11, 4.9516443e-11, 5.443359e-11, + 5.905944e-11, 6.344942e-11, 6.7643814e-11, 7.1672945e-11, + 7.556032e-11, 7.932458e-11, 8.298079e-11, 8.654132e-11, + 9.0016515e-11, 9.3415074e-11, 9.674443e-11, 1.0001099e-10, + 1.03220314e-10, 1.06377254e-10, 1.09486115e-10, 1.1255068e-10, + 1.1557435e-10, 1.1856015e-10, 1.2151083e-10, 1.2442886e-10, + 1.2731648e-10, 1.3017575e-10, 1.3300853e-10, 1.3581657e-10, + 1.3860142e-10, 1.4136457e-10, 1.4410738e-10, 1.4683108e-10, + 1.4953687e-10, 1.5222583e-10, 1.54899e-10, 1.5755733e-10, + 1.6020171e-10, 1.6283301e-10, 1.6545203e-10, 1.6805951e-10, + 1.7065617e-10, 1.732427e-10, 1.7581973e-10, 1.7838787e-10, + 1.8094774e-10, 1.8349985e-10, 1.8604476e-10, 1.8858298e-10, + 1.9111498e-10, 1.9364126e-10, 1.9616223e-10, 1.9867835e-10, + 2.0119004e-10, 2.0369768e-10, 2.0620168e-10, 2.087024e-10, + 2.1120022e-10, 2.136955e-10, 2.1618855e-10, 2.1867974e-10, + 2.2116936e-10, 2.2365775e-10, 2.261452e-10, 2.2863202e-10, + 2.311185e-10, 2.3360494e-10, 2.360916e-10, 2.3857874e-10, + 2.4106667e-10, 2.4355562e-10, 2.4604588e-10, 2.485377e-10, + 2.5103128e-10, 2.5352695e-10, 2.560249e-10, 2.585254e-10, + 2.6102867e-10, 2.6353494e-10, 2.6604446e-10, 2.6855745e-10, + 2.7107416e-10, 2.7359479e-10, 2.761196e-10, 2.7864877e-10, + 2.8118255e-10, 2.8372119e-10, 2.8626485e-10, 2.888138e-10, + 2.9136826e-10, 2.939284e-10, 2.9649452e-10, 2.9906677e-10, + 3.016454e-10, 3.0423064e-10, 3.0682268e-10, 3.0942177e-10, + 3.1202813e-10, 3.1464195e-10, 3.1726352e-10, 3.19893e-10, + 3.2253064e-10, 3.251767e-10, 3.2783135e-10, 3.3049485e-10, + 3.3316744e-10, 3.3584938e-10, 3.3854083e-10, 3.4124212e-10, + 3.4395342e-10, 3.46675e-10, 3.4940711e-10, 3.5215003e-10, + 3.5490397e-10, 3.5766917e-10, 3.6044595e-10, 3.6323455e-10, + 3.660352e-10, 3.6884823e-10, 3.7167386e-10, 3.745124e-10, + 3.773641e-10, 3.802293e-10, 3.8310827e-10, 3.860013e-10, + 3.8890866e-10, 3.918307e-10, 3.9476775e-10, 3.9772008e-10, + 4.0068804e-10, 4.0367196e-10, 4.0667217e-10, 4.09689e-10, + 4.1272286e-10, 4.1577405e-10, 4.1884296e-10, 4.2192994e-10, + 4.250354e-10, 4.281597e-10, 4.313033e-10, 4.3446652e-10, + 4.3764986e-10, 4.408537e-10, 4.4407847e-10, 4.4732465e-10, + 4.5059267e-10, 4.5388301e-10, 4.571962e-10, 4.6053267e-10, + 4.6389292e-10, 4.6727755e-10, 4.70687e-10, 4.741219e-10, + 4.7758275e-10, 4.810702e-10, 4.845848e-10, 4.8812715e-10, + 4.9169796e-10, 4.9529775e-10, 4.989273e-10, 5.0258725e-10, + 5.0627835e-10, 5.100013e-10, 5.1375687e-10, 5.1754584e-10, + 5.21369e-10, 5.2522725e-10, 5.2912136e-10, 5.330522e-10, + 5.370208e-10, 5.4102806e-10, 5.45075e-10, 5.491625e-10, + 5.532918e-10, 5.5746385e-10, 5.616799e-10, 5.6594107e-10, + 5.7024857e-10, 5.746037e-10, 5.7900773e-10, 5.834621e-10, + 5.8796823e-10, 5.925276e-10, 5.971417e-10, 6.018122e-10, + 6.065408e-10, 6.113292e-10, 6.1617933e-10, 6.2109295e-10, + 6.260722e-10, 6.3111916e-10, 6.3623595e-10, 6.4142497e-10, + 6.4668854e-10, 6.5202926e-10, 6.5744976e-10, 6.6295286e-10, + 6.6854156e-10, 6.742188e-10, 6.79988e-10, 6.858526e-10, + 6.9181616e-10, 6.978826e-10, 7.04056e-10, 7.103407e-10, + 7.167412e-10, 7.2326256e-10, 7.2990985e-10, 7.366886e-10, + 7.4360473e-10, 7.5066453e-10, 7.5787476e-10, 7.6524265e-10, + 7.7277595e-10, 7.80483e-10, 7.883728e-10, 7.9645507e-10, + 8.047402e-10, 8.1323964e-10, 8.219657e-10, 8.309319e-10, + 8.401528e-10, 8.496445e-10, 8.594247e-10, 8.6951274e-10, + 8.799301e-10, 8.9070046e-10, 9.018503e-10, 9.134092e-10, + 9.254101e-10, 9.378904e-10, 9.508923e-10, 9.644638e-10, + 9.786603e-10, 9.935448e-10, 1.0091913e-09, 1.025686e-09, + 1.0431306e-09, 1.0616465e-09, 1.08138e-09, 1.1025096e-09, + 1.1252564e-09, 1.1498986e-09, 1.1767932e-09, 1.206409e-09, + 1.2393786e-09, 1.276585e-09, 1.3193139e-09, 1.3695435e-09, + 1.4305498e-09, 1.508365e-09, 1.6160854e-09, 1.7921248e-09, +} +var fe = [256]float32{ + 1, 0.9381437, 0.90046996, 0.87170434, 0.8477855, 0.8269933, + 0.8084217, 0.7915276, 0.77595687, 0.7614634, 0.7478686, + 0.7350381, 0.72286767, 0.71127474, 0.70019263, 0.6895665, + 0.67935055, 0.6695063, 0.66000086, 0.65080583, 0.6418967, + 0.63325197, 0.6248527, 0.6166822, 0.60872537, 0.60096896, + 0.5934009, 0.58601034, 0.5787874, 0.57172304, 0.5648092, + 0.5580383, 0.5514034, 0.5448982, 0.5385169, 0.53225386, + 0.5261042, 0.52006316, 0.5141264, 0.50828975, 0.5025495, + 0.496902, 0.49134386, 0.485872, 0.48048335, 0.4751752, + 0.46994483, 0.46478975, 0.45970762, 0.45469615, 0.44975325, + 0.44487688, 0.44006512, 0.43531612, 0.43062815, 0.42599955, + 0.42142874, 0.4169142, 0.41245446, 0.40804818, 0.403694, + 0.3993907, 0.39513698, 0.39093173, 0.38677382, 0.38266218, + 0.37859577, 0.37457356, 0.37059465, 0.3666581, 0.362763, + 0.35890847, 0.35509375, 0.351318, 0.3475805, 0.34388044, + 0.34021714, 0.3365899, 0.33299807, 0.32944095, 0.32591796, + 0.3224285, 0.3189719, 0.31554767, 0.31215525, 0.30879408, + 0.3054636, 0.3021634, 0.29889292, 0.2956517, 0.29243928, + 0.28925523, 0.28609908, 0.28297043, 0.27986884, 0.27679393, + 0.2737453, 0.2707226, 0.2677254, 0.26475343, 0.26180625, + 0.25888354, 0.25598502, 0.2531103, 0.25025907, 0.24743107, + 0.24462597, 0.24184346, 0.23908329, 0.23634516, 0.23362878, + 0.23093392, 0.2282603, 0.22560766, 0.22297576, 0.22036438, + 0.21777324, 0.21520215, 0.21265087, 0.21011916, 0.20760682, + 0.20511365, 0.20263945, 0.20018397, 0.19774707, 0.19532852, + 0.19292815, 0.19054577, 0.1881812, 0.18583426, 0.18350479, + 0.1811926, 0.17889754, 0.17661946, 0.17435817, 0.17211354, + 0.1698854, 0.16767362, 0.16547804, 0.16329853, 0.16113494, + 0.15898713, 0.15685499, 0.15473837, 0.15263714, 0.15055119, + 0.14848037, 0.14642459, 0.14438373, 0.14235765, 0.14034624, + 0.13834943, 0.13636707, 0.13439907, 0.13244532, 0.13050574, + 0.1285802, 0.12666863, 0.12477092, 0.12288698, 0.12101672, + 0.119160056, 0.1173169, 0.115487166, 0.11367077, 0.11186763, + 0.11007768, 0.10830083, 0.10653701, 0.10478614, 0.10304816, + 0.101323, 0.09961058, 0.09791085, 0.09622374, 0.09454919, + 0.09288713, 0.091237515, 0.08960028, 0.087975375, 0.08636274, + 0.08476233, 0.083174095, 0.081597984, 0.08003395, 0.07848195, + 0.076941945, 0.07541389, 0.07389775, 0.072393484, 0.07090106, + 0.069420435, 0.06795159, 0.066494495, 0.06504912, 0.063615434, + 0.062193416, 0.060783047, 0.059384305, 0.057997175, + 0.05662164, 0.05525769, 0.053905312, 0.052564494, 0.051235236, + 0.049917534, 0.048611384, 0.047316793, 0.046033762, 0.0447623, + 0.043502413, 0.042254124, 0.041017443, 0.039792392, + 0.038578995, 0.037377283, 0.036187284, 0.035009038, + 0.033842582, 0.032687962, 0.031545233, 0.030414443, 0.02929566, + 0.02818895, 0.027094385, 0.026012046, 0.024942026, 0.023884421, + 0.022839336, 0.021806888, 0.020787204, 0.019780423, 0.0187867, + 0.0178062, 0.016839107, 0.015885621, 0.014945968, 0.014020392, + 0.013109165, 0.012212592, 0.011331013, 0.01046481, 0.009614414, + 0.008780315, 0.007963077, 0.0071633533, 0.006381906, + 0.0056196423, 0.0048776558, 0.004157295, 0.0034602648, + 0.0027887989, 0.0021459677, 0.0015362998, 0.0009672693, + 0.00045413437, +} diff --git a/vendor/golang.org/x/exp/rand/normal.go b/vendor/golang.org/x/exp/rand/normal.go new file mode 100644 index 00000000..b66da3a8 --- /dev/null +++ b/vendor/golang.org/x/exp/rand/normal.go @@ -0,0 +1,156 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rand + +import ( + "math" +) + +/* + * Normal distribution + * + * See "The Ziggurat Method for Generating Random Variables" + * (Marsaglia & Tsang, 2000) + * http://www.jstatsoft.org/v05/i08/paper [pdf] + */ + +const ( + rn = 3.442619855899 +) + +func absInt32(i int32) uint32 { + if i < 0 { + return uint32(-i) + } + return uint32(i) +} + +// NormFloat64 returns a normally distributed float64 in the range +// [-math.MaxFloat64, +math.MaxFloat64] with +// standard normal distribution (mean = 0, stddev = 1). +// To produce a different normal distribution, callers can +// adjust the output using: +// +// sample = NormFloat64() * desiredStdDev + desiredMean +func (r *Rand) NormFloat64() float64 { + for { + j := int32(r.Uint32()) // Possibly negative + i := j & 0x7F + x := float64(j) * float64(wn[i]) + if absInt32(j) < kn[i] { + // This case should be hit better than 99% of the time. + return x + } + + if i == 0 { + // This extra work is only required for the base strip. + for { + x = -math.Log(r.Float64()) * (1.0 / rn) + y := -math.Log(r.Float64()) + if y+y >= x*x { + break + } + } + if j > 0 { + return rn + x + } + return -rn - x + } + if fn[i]+float32(r.Float64())*(fn[i-1]-fn[i]) < float32(math.Exp(-.5*x*x)) { + return x + } + } +} + +var kn = [128]uint32{ + 0x76ad2212, 0x0, 0x600f1b53, 0x6ce447a6, 0x725b46a2, + 0x7560051d, 0x774921eb, 0x789a25bd, 0x799045c3, 0x7a4bce5d, + 0x7adf629f, 0x7b5682a6, 0x7bb8a8c6, 0x7c0ae722, 0x7c50cce7, + 0x7c8cec5b, 0x7cc12cd6, 0x7ceefed2, 0x7d177e0b, 0x7d3b8883, + 0x7d5bce6c, 0x7d78dd64, 0x7d932886, 0x7dab0e57, 0x7dc0dd30, + 0x7dd4d688, 0x7de73185, 0x7df81cea, 0x7e07c0a3, 0x7e163efa, + 0x7e23b587, 0x7e303dfd, 0x7e3beec2, 0x7e46db77, 0x7e51155d, + 0x7e5aabb3, 0x7e63abf7, 0x7e6c222c, 0x7e741906, 0x7e7b9a18, + 0x7e82adfa, 0x7e895c63, 0x7e8fac4b, 0x7e95a3fb, 0x7e9b4924, + 0x7ea0a0ef, 0x7ea5b00d, 0x7eaa7ac3, 0x7eaf04f3, 0x7eb3522a, + 0x7eb765a5, 0x7ebb4259, 0x7ebeeafd, 0x7ec2620a, 0x7ec5a9c4, + 0x7ec8c441, 0x7ecbb365, 0x7ece78ed, 0x7ed11671, 0x7ed38d62, + 0x7ed5df12, 0x7ed80cb4, 0x7eda175c, 0x7edc0005, 0x7eddc78e, + 0x7edf6ebf, 0x7ee0f647, 0x7ee25ebe, 0x7ee3a8a9, 0x7ee4d473, + 0x7ee5e276, 0x7ee6d2f5, 0x7ee7a620, 0x7ee85c10, 0x7ee8f4cd, + 0x7ee97047, 0x7ee9ce59, 0x7eea0eca, 0x7eea3147, 0x7eea3568, + 0x7eea1aab, 0x7ee9e071, 0x7ee98602, 0x7ee90a88, 0x7ee86d08, + 0x7ee7ac6a, 0x7ee6c769, 0x7ee5bc9c, 0x7ee48a67, 0x7ee32efc, + 0x7ee1a857, 0x7edff42f, 0x7ede0ffa, 0x7edbf8d9, 0x7ed9ab94, + 0x7ed7248d, 0x7ed45fae, 0x7ed1585c, 0x7ece095f, 0x7eca6ccb, + 0x7ec67be2, 0x7ec22eee, 0x7ebd7d1a, 0x7eb85c35, 0x7eb2c075, + 0x7eac9c20, 0x7ea5df27, 0x7e9e769f, 0x7e964c16, 0x7e8d44ba, + 0x7e834033, 0x7e781728, 0x7e6b9933, 0x7e5d8a1a, 0x7e4d9ded, + 0x7e3b737a, 0x7e268c2f, 0x7e0e3ff5, 0x7df1aa5d, 0x7dcf8c72, + 0x7da61a1e, 0x7d72a0fb, 0x7d30e097, 0x7cd9b4ab, 0x7c600f1a, + 0x7ba90bdc, 0x7a722176, 0x77d664e5, +} +var wn = [128]float32{ + 1.7290405e-09, 1.2680929e-10, 1.6897518e-10, 1.9862688e-10, + 2.2232431e-10, 2.4244937e-10, 2.601613e-10, 2.7611988e-10, + 2.9073963e-10, 3.042997e-10, 3.1699796e-10, 3.289802e-10, + 3.4035738e-10, 3.5121603e-10, 3.616251e-10, 3.7164058e-10, + 3.8130857e-10, 3.9066758e-10, 3.9975012e-10, 4.08584e-10, + 4.1719309e-10, 4.2559822e-10, 4.338176e-10, 4.418672e-10, + 4.497613e-10, 4.5751258e-10, 4.651324e-10, 4.7263105e-10, + 4.8001775e-10, 4.87301e-10, 4.944885e-10, 5.015873e-10, + 5.0860405e-10, 5.155446e-10, 5.2241467e-10, 5.2921934e-10, + 5.359635e-10, 5.426517e-10, 5.4928817e-10, 5.5587696e-10, + 5.624219e-10, 5.6892646e-10, 5.753941e-10, 5.818282e-10, + 5.882317e-10, 5.946077e-10, 6.00959e-10, 6.072884e-10, + 6.135985e-10, 6.19892e-10, 6.2617134e-10, 6.3243905e-10, + 6.386974e-10, 6.449488e-10, 6.511956e-10, 6.5744005e-10, + 6.6368433e-10, 6.699307e-10, 6.7618144e-10, 6.824387e-10, + 6.8870465e-10, 6.949815e-10, 7.012715e-10, 7.075768e-10, + 7.1389966e-10, 7.202424e-10, 7.266073e-10, 7.329966e-10, + 7.394128e-10, 7.4585826e-10, 7.5233547e-10, 7.58847e-10, + 7.653954e-10, 7.719835e-10, 7.7861395e-10, 7.852897e-10, + 7.920138e-10, 7.987892e-10, 8.0561924e-10, 8.125073e-10, + 8.194569e-10, 8.2647167e-10, 8.3355556e-10, 8.407127e-10, + 8.479473e-10, 8.55264e-10, 8.6266755e-10, 8.7016316e-10, + 8.777562e-10, 8.8545243e-10, 8.932582e-10, 9.0117996e-10, + 9.09225e-10, 9.174008e-10, 9.2571584e-10, 9.341788e-10, + 9.427997e-10, 9.515889e-10, 9.605579e-10, 9.697193e-10, + 9.790869e-10, 9.88676e-10, 9.985036e-10, 1.0085882e-09, + 1.0189509e-09, 1.0296151e-09, 1.0406069e-09, 1.0519566e-09, + 1.063698e-09, 1.0758702e-09, 1.0885183e-09, 1.1016947e-09, + 1.1154611e-09, 1.1298902e-09, 1.1450696e-09, 1.1611052e-09, + 1.1781276e-09, 1.1962995e-09, 1.2158287e-09, 1.2369856e-09, + 1.2601323e-09, 1.2857697e-09, 1.3146202e-09, 1.347784e-09, + 1.3870636e-09, 1.4357403e-09, 1.5008659e-09, 1.6030948e-09, +} +var fn = [128]float32{ + 1, 0.9635997, 0.9362827, 0.9130436, 0.89228165, 0.87324303, + 0.8555006, 0.8387836, 0.8229072, 0.8077383, 0.793177, + 0.7791461, 0.7655842, 0.7524416, 0.73967725, 0.7272569, + 0.7151515, 0.7033361, 0.69178915, 0.68049186, 0.6694277, + 0.658582, 0.6479418, 0.63749546, 0.6272325, 0.6171434, + 0.6072195, 0.5974532, 0.58783704, 0.5783647, 0.56903, + 0.5598274, 0.5507518, 0.54179835, 0.5329627, 0.52424055, + 0.5156282, 0.50712204, 0.49871865, 0.49041483, 0.48220766, + 0.4740943, 0.46607214, 0.4581387, 0.45029163, 0.44252872, + 0.43484783, 0.427247, 0.41972435, 0.41227803, 0.40490642, + 0.39760786, 0.3903808, 0.3832238, 0.37613547, 0.36911446, + 0.3621595, 0.35526937, 0.34844297, 0.34167916, 0.33497685, + 0.3283351, 0.3217529, 0.3152294, 0.30876362, 0.30235484, + 0.29600215, 0.28970486, 0.2834622, 0.2772735, 0.27113807, + 0.2650553, 0.25902456, 0.2530453, 0.24711695, 0.241239, + 0.23541094, 0.22963232, 0.2239027, 0.21822165, 0.21258877, + 0.20700371, 0.20146611, 0.19597565, 0.19053204, 0.18513499, + 0.17978427, 0.17447963, 0.1692209, 0.16400786, 0.15884037, + 0.15371831, 0.14864157, 0.14361008, 0.13862377, 0.13368265, + 0.12878671, 0.12393598, 0.119130544, 0.11437051, 0.10965602, + 0.104987256, 0.10036444, 0.095787846, 0.0912578, 0.08677467, + 0.0823389, 0.077950984, 0.073611505, 0.06932112, 0.06508058, + 0.06089077, 0.056752663, 0.0526674, 0.048636295, 0.044660863, + 0.040742867, 0.03688439, 0.033087887, 0.029356318, + 0.025693292, 0.022103304, 0.018592102, 0.015167298, + 0.011839478, 0.008624485, 0.005548995, 0.0026696292, +} diff --git a/vendor/golang.org/x/exp/rand/rand.go b/vendor/golang.org/x/exp/rand/rand.go new file mode 100644 index 00000000..ee6161bc --- /dev/null +++ b/vendor/golang.org/x/exp/rand/rand.go @@ -0,0 +1,372 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rand implements pseudo-random number generators. +// +// Random numbers are generated by a Source. Top-level functions, such as +// Float64 and Int, use a default shared Source that produces a deterministic +// sequence of values each time a program is run. Use the Seed function to +// initialize the default Source if different behavior is required for each run. +// The default Source, a LockedSource, is safe for concurrent use by multiple +// goroutines, but Sources created by NewSource are not. However, Sources are small +// and it is reasonable to have a separate Source for each goroutine, seeded +// differently, to avoid locking. +// +// For random numbers suitable for security-sensitive work, see the crypto/rand +// package. +package rand + +import "sync" + +// A Source represents a source of uniformly-distributed +// pseudo-random int64 values in the range [0, 1<<64). +type Source interface { + Uint64() uint64 + Seed(seed uint64) +} + +// NewSource returns a new pseudo-random Source seeded with the given value. +func NewSource(seed uint64) Source { + var rng PCGSource + rng.Seed(seed) + return &rng +} + +// A Rand is a source of random numbers. +type Rand struct { + src Source + + // readVal contains remainder of 64-bit integer used for bytes + // generation during most recent Read call. + // It is saved so next Read call can start where the previous + // one finished. + readVal uint64 + // readPos indicates the number of low-order bytes of readVal + // that are still valid. + readPos int8 +} + +// New returns a new Rand that uses random values from src +// to generate other random values. +func New(src Source) *Rand { + return &Rand{src: src} +} + +// Seed uses the provided seed value to initialize the generator to a deterministic state. +// Seed should not be called concurrently with any other Rand method. +func (r *Rand) Seed(seed uint64) { + if lk, ok := r.src.(*LockedSource); ok { + lk.seedPos(seed, &r.readPos) + return + } + + r.src.Seed(seed) + r.readPos = 0 +} + +// Uint64 returns a pseudo-random 64-bit integer as a uint64. +func (r *Rand) Uint64() uint64 { return r.src.Uint64() } + +// Int63 returns a non-negative pseudo-random 63-bit integer as an int64. +func (r *Rand) Int63() int64 { return int64(r.src.Uint64() &^ (1 << 63)) } + +// Uint32 returns a pseudo-random 32-bit value as a uint32. +func (r *Rand) Uint32() uint32 { return uint32(r.Uint64() >> 32) } + +// Int31 returns a non-negative pseudo-random 31-bit integer as an int32. +func (r *Rand) Int31() int32 { return int32(r.Uint64() >> 33) } + +// Int returns a non-negative pseudo-random int. +func (r *Rand) Int() int { + u := uint(r.Uint64()) + return int(u << 1 >> 1) // clear sign bit. +} + +const maxUint64 = (1 << 64) - 1 + +// Uint64n returns, as a uint64, a pseudo-random number in [0,n). +// It is guaranteed more uniform than taking a Source value mod n +// for any n that is not a power of 2. +func (r *Rand) Uint64n(n uint64) uint64 { + if n&(n-1) == 0 { // n is power of two, can mask + if n == 0 { + panic("invalid argument to Uint64n") + } + return r.Uint64() & (n - 1) + } + // If n does not divide v, to avoid bias we must not use + // a v that is within maxUint64%n of the top of the range. + v := r.Uint64() + if v > maxUint64-n { // Fast check. + ceiling := maxUint64 - maxUint64%n + for v >= ceiling { + v = r.Uint64() + } + } + + return v % n +} + +// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n). +// It panics if n <= 0. +func (r *Rand) Int63n(n int64) int64 { + if n <= 0 { + panic("invalid argument to Int63n") + } + return int64(r.Uint64n(uint64(n))) +} + +// Int31n returns, as an int32, a non-negative pseudo-random number in [0,n). +// It panics if n <= 0. +func (r *Rand) Int31n(n int32) int32 { + if n <= 0 { + panic("invalid argument to Int31n") + } + // TODO: Avoid some 64-bit ops to make it more efficient on 32-bit machines. + return int32(r.Uint64n(uint64(n))) +} + +// Intn returns, as an int, a non-negative pseudo-random number in [0,n). +// It panics if n <= 0. +func (r *Rand) Intn(n int) int { + if n <= 0 { + panic("invalid argument to Intn") + } + // TODO: Avoid some 64-bit ops to make it more efficient on 32-bit machines. + return int(r.Uint64n(uint64(n))) +} + +// Float64 returns, as a float64, a pseudo-random number in [0.0,1.0). +func (r *Rand) Float64() float64 { + // There is one bug in the value stream: r.Int63() may be so close + // to 1<<63 that the division rounds up to 1.0, and we've guaranteed + // that the result is always less than 1.0. + // + // We tried to fix this by mapping 1.0 back to 0.0, but since float64 + // values near 0 are much denser than near 1, mapping 1 to 0 caused + // a theoretically significant overshoot in the probability of returning 0. + // Instead of that, if we round up to 1, just try again. + // Getting 1 only happens 1/2⁵³ of the time, so most clients + // will not observe it anyway. +again: + f := float64(r.Uint64n(1<<53)) / (1 << 53) + if f == 1.0 { + goto again // resample; this branch is taken O(never) + } + return f +} + +// Float32 returns, as a float32, a pseudo-random number in [0.0,1.0). +func (r *Rand) Float32() float32 { + // We do not want to return 1.0. + // This only happens 1/2²⁴ of the time (plus the 1/2⁵³ of the time in Float64). +again: + f := float32(r.Float64()) + if f == 1 { + goto again // resample; this branch is taken O(very rarely) + } + return f +} + +// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n). +func (r *Rand) Perm(n int) []int { + m := make([]int, n) + // In the following loop, the iteration when i=0 always swaps m[0] with m[0]. + // A change to remove this useless iteration is to assign 1 to i in the init + // statement. But Perm also effects r. Making this change will affect + // the final state of r. So this change can't be made for compatibility + // reasons for Go 1. + for i := 0; i < n; i++ { + j := r.Intn(i + 1) + m[i] = m[j] + m[j] = i + } + return m +} + +// Shuffle pseudo-randomizes the order of elements. +// n is the number of elements. Shuffle panics if n < 0. +// swap swaps the elements with indexes i and j. +func (r *Rand) Shuffle(n int, swap func(i, j int)) { + if n < 0 { + panic("invalid argument to Shuffle") + } + + // Fisher-Yates shuffle: https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle + // Shuffle really ought not be called with n that doesn't fit in 32 bits. + // Not only will it take a very long time, but with 2³¹! possible permutations, + // there's no way that any PRNG can have a big enough internal state to + // generate even a minuscule percentage of the possible permutations. + // Nevertheless, the right API signature accepts an int n, so handle it as best we can. + i := n - 1 + for ; i > 1<<31-1-1; i-- { + j := int(r.Int63n(int64(i + 1))) + swap(i, j) + } + for ; i > 0; i-- { + j := int(r.Int31n(int32(i + 1))) + swap(i, j) + } +} + +// Read generates len(p) random bytes and writes them into p. It +// always returns len(p) and a nil error. +// Read should not be called concurrently with any other Rand method unless +// the underlying source is a LockedSource. +func (r *Rand) Read(p []byte) (n int, err error) { + if lk, ok := r.src.(*LockedSource); ok { + return lk.Read(p, &r.readVal, &r.readPos) + } + return read(p, r.src, &r.readVal, &r.readPos) +} + +func read(p []byte, src Source, readVal *uint64, readPos *int8) (n int, err error) { + pos := *readPos + val := *readVal + rng, _ := src.(*PCGSource) + for n = 0; n < len(p); n++ { + if pos == 0 { + if rng != nil { + val = rng.Uint64() + } else { + val = src.Uint64() + } + pos = 8 + } + p[n] = byte(val) + val >>= 8 + pos-- + } + *readPos = pos + *readVal = val + return +} + +/* + * Top-level convenience functions + */ + +var globalRand = New(&LockedSource{src: *NewSource(1).(*PCGSource)}) + +// Type assert that globalRand's source is a LockedSource whose src is a PCGSource. +var _ PCGSource = globalRand.src.(*LockedSource).src + +// Seed uses the provided seed value to initialize the default Source to a +// deterministic state. If Seed is not called, the generator behaves as +// if seeded by Seed(1). +// Seed, unlike the Rand.Seed method, is safe for concurrent use. +func Seed(seed uint64) { globalRand.Seed(seed) } + +// Int63 returns a non-negative pseudo-random 63-bit integer as an int64 +// from the default Source. +func Int63() int64 { return globalRand.Int63() } + +// Uint32 returns a pseudo-random 32-bit value as a uint32 +// from the default Source. +func Uint32() uint32 { return globalRand.Uint32() } + +// Uint64 returns a pseudo-random 64-bit value as a uint64 +// from the default Source. +func Uint64() uint64 { return globalRand.Uint64() } + +// Int31 returns a non-negative pseudo-random 31-bit integer as an int32 +// from the default Source. +func Int31() int32 { return globalRand.Int31() } + +// Int returns a non-negative pseudo-random int from the default Source. +func Int() int { return globalRand.Int() } + +// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n) +// from the default Source. +// It panics if n <= 0. +func Int63n(n int64) int64 { return globalRand.Int63n(n) } + +// Int31n returns, as an int32, a non-negative pseudo-random number in [0,n) +// from the default Source. +// It panics if n <= 0. +func Int31n(n int32) int32 { return globalRand.Int31n(n) } + +// Intn returns, as an int, a non-negative pseudo-random number in [0,n) +// from the default Source. +// It panics if n <= 0. +func Intn(n int) int { return globalRand.Intn(n) } + +// Float64 returns, as a float64, a pseudo-random number in [0.0,1.0) +// from the default Source. +func Float64() float64 { return globalRand.Float64() } + +// Float32 returns, as a float32, a pseudo-random number in [0.0,1.0) +// from the default Source. +func Float32() float32 { return globalRand.Float32() } + +// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n) +// from the default Source. +func Perm(n int) []int { return globalRand.Perm(n) } + +// Shuffle pseudo-randomizes the order of elements using the default Source. +// n is the number of elements. Shuffle panics if n < 0. +// swap swaps the elements with indexes i and j. +func Shuffle(n int, swap func(i, j int)) { globalRand.Shuffle(n, swap) } + +// Read generates len(p) random bytes from the default Source and +// writes them into p. It always returns len(p) and a nil error. +// Read, unlike the Rand.Read method, is safe for concurrent use. +func Read(p []byte) (n int, err error) { return globalRand.Read(p) } + +// NormFloat64 returns a normally distributed float64 in the range +// [-math.MaxFloat64, +math.MaxFloat64] with +// standard normal distribution (mean = 0, stddev = 1) +// from the default Source. +// To produce a different normal distribution, callers can +// adjust the output using: +// +// sample = NormFloat64() * desiredStdDev + desiredMean +func NormFloat64() float64 { return globalRand.NormFloat64() } + +// ExpFloat64 returns an exponentially distributed float64 in the range +// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter +// (lambda) is 1 and whose mean is 1/lambda (1) from the default Source. +// To produce a distribution with a different rate parameter, +// callers can adjust the output using: +// +// sample = ExpFloat64() / desiredRateParameter +func ExpFloat64() float64 { return globalRand.ExpFloat64() } + +// LockedSource is an implementation of Source that is concurrency-safe. +// A Rand using a LockedSource is safe for concurrent use. +// +// The zero value of LockedSource is valid, but should be seeded before use. +type LockedSource struct { + lk sync.Mutex + src PCGSource +} + +func (s *LockedSource) Uint64() (n uint64) { + s.lk.Lock() + n = s.src.Uint64() + s.lk.Unlock() + return +} + +func (s *LockedSource) Seed(seed uint64) { + s.lk.Lock() + s.src.Seed(seed) + s.lk.Unlock() +} + +// seedPos implements Seed for a LockedSource without a race condiiton. +func (s *LockedSource) seedPos(seed uint64, readPos *int8) { + s.lk.Lock() + s.src.Seed(seed) + *readPos = 0 + s.lk.Unlock() +} + +// Read implements Read for a LockedSource. +func (s *LockedSource) Read(p []byte, readVal *uint64, readPos *int8) (n int, err error) { + s.lk.Lock() + n, err = read(p, &s.src, readVal, readPos) + s.lk.Unlock() + return +} diff --git a/vendor/golang.org/x/exp/rand/rng.go b/vendor/golang.org/x/exp/rand/rng.go new file mode 100644 index 00000000..9b79108c --- /dev/null +++ b/vendor/golang.org/x/exp/rand/rng.go @@ -0,0 +1,91 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rand + +import ( + "encoding/binary" + "io" + "math/bits" +) + +// PCGSource is an implementation of a 64-bit permuted congruential +// generator as defined in +// +// PCG: A Family of Simple Fast Space-Efficient Statistically Good +// Algorithms for Random Number Generation +// Melissa E. O’Neill, Harvey Mudd College +// http://www.pcg-random.org/pdf/toms-oneill-pcg-family-v1.02.pdf +// +// The generator here is the congruential generator PCG XSL RR 128/64 (LCG) +// as found in the software available at http://www.pcg-random.org/. +// It has period 2^128 with 128 bits of state, producing 64-bit values. +// Is state is represented by two uint64 words. +type PCGSource struct { + low uint64 + high uint64 +} + +const ( + maxUint32 = (1 << 32) - 1 + + multiplier = 47026247687942121848144207491837523525 + mulHigh = multiplier >> 64 + mulLow = multiplier & maxUint64 + + increment = 117397592171526113268558934119004209487 + incHigh = increment >> 64 + incLow = increment & maxUint64 + + // TODO: Use these? + initializer = 245720598905631564143578724636268694099 + initHigh = initializer >> 64 + initLow = initializer & maxUint64 +) + +// Seed uses the provided seed value to initialize the generator to a deterministic state. +func (pcg *PCGSource) Seed(seed uint64) { + pcg.low = seed + pcg.high = seed // TODO: What is right? +} + +// Uint64 returns a pseudo-random 64-bit unsigned integer as a uint64. +func (pcg *PCGSource) Uint64() uint64 { + pcg.multiply() + pcg.add() + // XOR high and low 64 bits together and rotate right by high 6 bits of state. + return bits.RotateLeft64(pcg.high^pcg.low, -int(pcg.high>>58)) +} + +func (pcg *PCGSource) add() { + var carry uint64 + pcg.low, carry = bits.Add64(pcg.low, incLow, 0) + pcg.high, _ = bits.Add64(pcg.high, incHigh, carry) +} + +func (pcg *PCGSource) multiply() { + hi, lo := bits.Mul64(pcg.low, mulLow) + hi += pcg.high * mulLow + hi += pcg.low * mulHigh + pcg.low = lo + pcg.high = hi +} + +// MarshalBinary returns the binary representation of the current state of the generator. +func (pcg *PCGSource) MarshalBinary() ([]byte, error) { + var buf [16]byte + binary.BigEndian.PutUint64(buf[:8], pcg.high) + binary.BigEndian.PutUint64(buf[8:], pcg.low) + return buf[:], nil +} + +// UnmarshalBinary sets the state of the generator to the state represented in data. +func (pcg *PCGSource) UnmarshalBinary(data []byte) error { + if len(data) < 16 { + return io.ErrUnexpectedEOF + } + pcg.low = binary.BigEndian.Uint64(data[8:]) + pcg.high = binary.BigEndian.Uint64(data[:8]) + return nil +} diff --git a/vendor/golang.org/x/exp/rand/zipf.go b/vendor/golang.org/x/exp/rand/zipf.go new file mode 100644 index 00000000..f04c814e --- /dev/null +++ b/vendor/golang.org/x/exp/rand/zipf.go @@ -0,0 +1,77 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// W.Hormann, G.Derflinger: +// "Rejection-Inversion to Generate Variates +// from Monotone Discrete Distributions" +// http://eeyore.wu-wien.ac.at/papers/96-04-04.wh-der.ps.gz + +package rand + +import "math" + +// A Zipf generates Zipf distributed variates. +type Zipf struct { + r *Rand + imax float64 + v float64 + q float64 + s float64 + oneminusQ float64 + oneminusQinv float64 + hxm float64 + hx0minusHxm float64 +} + +func (z *Zipf) h(x float64) float64 { + return math.Exp(z.oneminusQ*math.Log(z.v+x)) * z.oneminusQinv +} + +func (z *Zipf) hinv(x float64) float64 { + return math.Exp(z.oneminusQinv*math.Log(z.oneminusQ*x)) - z.v +} + +// NewZipf returns a Zipf variate generator. +// The generator generates values k ∈ [0, imax] +// such that P(k) is proportional to (v + k) ** (-s). +// Requirements: s > 1 and v >= 1. +func NewZipf(r *Rand, s float64, v float64, imax uint64) *Zipf { + z := new(Zipf) + if s <= 1.0 || v < 1 { + return nil + } + z.r = r + z.imax = float64(imax) + z.v = v + z.q = s + z.oneminusQ = 1.0 - z.q + z.oneminusQinv = 1.0 / z.oneminusQ + z.hxm = z.h(z.imax + 0.5) + z.hx0minusHxm = z.h(0.5) - math.Exp(math.Log(z.v)*(-z.q)) - z.hxm + z.s = 1 - z.hinv(z.h(1.5)-math.Exp(-z.q*math.Log(z.v+1.0))) + return z +} + +// Uint64 returns a value drawn from the Zipf distribution described +// by the Zipf object. +func (z *Zipf) Uint64() uint64 { + if z == nil { + panic("rand: nil Zipf") + } + k := 0.0 + + for { + r := z.r.Float64() // r on [0,1] + ur := z.hxm + r*z.hx0minusHxm + x := z.hinv(ur) + k = math.Floor(x + 0.5) + if k-x <= z.s { + break + } + if ur >= z.h(k+0.5)-math.Exp(-math.Log(k+z.v)*z.q) { + break + } + } + return uint64(k) +} diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/mod/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/mod/PATENTS b/vendor/golang.org/x/mod/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/mod/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go b/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go new file mode 100644 index 00000000..150f887e --- /dev/null +++ b/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go @@ -0,0 +1,78 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lazyregexp is a thin wrapper over regexp, allowing the use of global +// regexp variables without forcing them to be compiled at init. +package lazyregexp + +import ( + "os" + "regexp" + "strings" + "sync" +) + +// Regexp is a wrapper around [regexp.Regexp], where the underlying regexp will be +// compiled the first time it is needed. +type Regexp struct { + str string + once sync.Once + rx *regexp.Regexp +} + +func (r *Regexp) re() *regexp.Regexp { + r.once.Do(r.build) + return r.rx +} + +func (r *Regexp) build() { + r.rx = regexp.MustCompile(r.str) + r.str = "" +} + +func (r *Regexp) FindSubmatch(s []byte) [][]byte { + return r.re().FindSubmatch(s) +} + +func (r *Regexp) FindStringSubmatch(s string) []string { + return r.re().FindStringSubmatch(s) +} + +func (r *Regexp) FindStringSubmatchIndex(s string) []int { + return r.re().FindStringSubmatchIndex(s) +} + +func (r *Regexp) ReplaceAllString(src, repl string) string { + return r.re().ReplaceAllString(src, repl) +} + +func (r *Regexp) FindString(s string) string { + return r.re().FindString(s) +} + +func (r *Regexp) FindAllString(s string, n int) []string { + return r.re().FindAllString(s, n) +} + +func (r *Regexp) MatchString(s string) bool { + return r.re().MatchString(s) +} + +func (r *Regexp) SubexpNames() []string { + return r.re().SubexpNames() +} + +var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") + +// New creates a new lazy regexp, delaying the compiling work until it is first +// needed. If the code is being run as part of tests, the regexp compiling will +// happen immediately. +func New(str string) *Regexp { + lr := &Regexp{str: str} + if inTest { + // In tests, always compile the regexps early. + lr.re() + } + return lr +} diff --git a/vendor/golang.org/x/mod/modfile/print.go b/vendor/golang.org/x/mod/modfile/print.go new file mode 100644 index 00000000..2a0123d4 --- /dev/null +++ b/vendor/golang.org/x/mod/modfile/print.go @@ -0,0 +1,184 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Module file printer. + +package modfile + +import ( + "bytes" + "fmt" + "strings" +) + +// Format returns a go.mod file as a byte slice, formatted in standard style. +func Format(f *FileSyntax) []byte { + pr := &printer{} + pr.file(f) + + // remove trailing blank lines + b := pr.Bytes() + for len(b) > 0 && b[len(b)-1] == '\n' && (len(b) == 1 || b[len(b)-2] == '\n') { + b = b[:len(b)-1] + } + return b +} + +// A printer collects the state during printing of a file or expression. +type printer struct { + bytes.Buffer // output buffer + comment []Comment // pending end-of-line comments + margin int // left margin (indent), a number of tabs +} + +// printf prints to the buffer. +func (p *printer) printf(format string, args ...interface{}) { + fmt.Fprintf(p, format, args...) +} + +// indent returns the position on the current line, in bytes, 0-indexed. +func (p *printer) indent() int { + b := p.Bytes() + n := 0 + for n < len(b) && b[len(b)-1-n] != '\n' { + n++ + } + return n +} + +// newline ends the current line, flushing end-of-line comments. +func (p *printer) newline() { + if len(p.comment) > 0 { + p.printf(" ") + for i, com := range p.comment { + if i > 0 { + p.trim() + p.printf("\n") + for i := 0; i < p.margin; i++ { + p.printf("\t") + } + } + p.printf("%s", strings.TrimSpace(com.Token)) + } + p.comment = p.comment[:0] + } + + p.trim() + if b := p.Bytes(); len(b) == 0 || (len(b) >= 2 && b[len(b)-1] == '\n' && b[len(b)-2] == '\n') { + // skip the blank line at top of file or after a blank line + } else { + p.printf("\n") + } + for i := 0; i < p.margin; i++ { + p.printf("\t") + } +} + +// trim removes trailing spaces and tabs from the current line. +func (p *printer) trim() { + // Remove trailing spaces and tabs from line we're about to end. + b := p.Bytes() + n := len(b) + for n > 0 && (b[n-1] == '\t' || b[n-1] == ' ') { + n-- + } + p.Truncate(n) +} + +// file formats the given file into the print buffer. +func (p *printer) file(f *FileSyntax) { + for _, com := range f.Before { + p.printf("%s", strings.TrimSpace(com.Token)) + p.newline() + } + + for i, stmt := range f.Stmt { + switch x := stmt.(type) { + case *CommentBlock: + // comments already handled + p.expr(x) + + default: + p.expr(x) + p.newline() + } + + for _, com := range stmt.Comment().After { + p.printf("%s", strings.TrimSpace(com.Token)) + p.newline() + } + + if i+1 < len(f.Stmt) { + p.newline() + } + } +} + +func (p *printer) expr(x Expr) { + // Emit line-comments preceding this expression. + if before := x.Comment().Before; len(before) > 0 { + // Want to print a line comment. + // Line comments must be at the current margin. + p.trim() + if p.indent() > 0 { + // There's other text on the line. Start a new line. + p.printf("\n") + } + // Re-indent to margin. + for i := 0; i < p.margin; i++ { + p.printf("\t") + } + for _, com := range before { + p.printf("%s", strings.TrimSpace(com.Token)) + p.newline() + } + } + + switch x := x.(type) { + default: + panic(fmt.Errorf("printer: unexpected type %T", x)) + + case *CommentBlock: + // done + + case *LParen: + p.printf("(") + case *RParen: + p.printf(")") + + case *Line: + p.tokens(x.Token) + + case *LineBlock: + p.tokens(x.Token) + p.printf(" ") + p.expr(&x.LParen) + p.margin++ + for _, l := range x.Line { + p.newline() + p.expr(l) + } + p.margin-- + p.newline() + p.expr(&x.RParen) + } + + // Queue end-of-line comments for printing when we + // reach the end of the line. + p.comment = append(p.comment, x.Comment().Suffix...) +} + +func (p *printer) tokens(tokens []string) { + sep := "" + for _, t := range tokens { + if t == "," || t == ")" || t == "]" || t == "}" { + sep = "" + } + p.printf("%s%s", sep, t) + sep = " " + if t == "(" || t == "[" || t == "{" { + sep = "" + } + } +} diff --git a/vendor/golang.org/x/mod/modfile/read.go b/vendor/golang.org/x/mod/modfile/read.go new file mode 100644 index 00000000..22056825 --- /dev/null +++ b/vendor/golang.org/x/mod/modfile/read.go @@ -0,0 +1,958 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modfile + +import ( + "bytes" + "errors" + "fmt" + "os" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// A Position describes an arbitrary source position in a file, including the +// file, line, column, and byte offset. +type Position struct { + Line int // line in input (starting at 1) + LineRune int // rune in line (starting at 1) + Byte int // byte in input (starting at 0) +} + +// add returns the position at the end of s, assuming it starts at p. +func (p Position) add(s string) Position { + p.Byte += len(s) + if n := strings.Count(s, "\n"); n > 0 { + p.Line += n + s = s[strings.LastIndex(s, "\n")+1:] + p.LineRune = 1 + } + p.LineRune += utf8.RuneCountInString(s) + return p +} + +// An Expr represents an input element. +type Expr interface { + // Span returns the start and end position of the expression, + // excluding leading or trailing comments. + Span() (start, end Position) + + // Comment returns the comments attached to the expression. + // This method would normally be named 'Comments' but that + // would interfere with embedding a type of the same name. + Comment() *Comments +} + +// A Comment represents a single // comment. +type Comment struct { + Start Position + Token string // without trailing newline + Suffix bool // an end of line (not whole line) comment +} + +// Comments collects the comments associated with an expression. +type Comments struct { + Before []Comment // whole-line comments before this expression + Suffix []Comment // end-of-line comments after this expression + + // For top-level expressions only, After lists whole-line + // comments following the expression. + After []Comment +} + +// Comment returns the receiver. This isn't useful by itself, but +// a [Comments] struct is embedded into all the expression +// implementation types, and this gives each of those a Comment +// method to satisfy the Expr interface. +func (c *Comments) Comment() *Comments { + return c +} + +// A FileSyntax represents an entire go.mod file. +type FileSyntax struct { + Name string // file path + Comments + Stmt []Expr +} + +func (x *FileSyntax) Span() (start, end Position) { + if len(x.Stmt) == 0 { + return + } + start, _ = x.Stmt[0].Span() + _, end = x.Stmt[len(x.Stmt)-1].Span() + return start, end +} + +// addLine adds a line containing the given tokens to the file. +// +// If the first token of the hint matches the first token of the +// line, the new line is added at the end of the block containing hint, +// extracting hint into a new block if it is not yet in one. +// +// If the hint is non-nil buts its first token does not match, +// the new line is added after the block containing hint +// (or hint itself, if not in a block). +// +// If no hint is provided, addLine appends the line to the end of +// the last block with a matching first token, +// or to the end of the file if no such block exists. +func (x *FileSyntax) addLine(hint Expr, tokens ...string) *Line { + if hint == nil { + // If no hint given, add to the last statement of the given type. + Loop: + for i := len(x.Stmt) - 1; i >= 0; i-- { + stmt := x.Stmt[i] + switch stmt := stmt.(type) { + case *Line: + if stmt.Token != nil && stmt.Token[0] == tokens[0] { + hint = stmt + break Loop + } + case *LineBlock: + if stmt.Token[0] == tokens[0] { + hint = stmt + break Loop + } + } + } + } + + newLineAfter := func(i int) *Line { + new := &Line{Token: tokens} + if i == len(x.Stmt) { + x.Stmt = append(x.Stmt, new) + } else { + x.Stmt = append(x.Stmt, nil) + copy(x.Stmt[i+2:], x.Stmt[i+1:]) + x.Stmt[i+1] = new + } + return new + } + + if hint != nil { + for i, stmt := range x.Stmt { + switch stmt := stmt.(type) { + case *Line: + if stmt == hint { + if stmt.Token == nil || stmt.Token[0] != tokens[0] { + return newLineAfter(i) + } + + // Convert line to line block. + stmt.InBlock = true + block := &LineBlock{Token: stmt.Token[:1], Line: []*Line{stmt}} + stmt.Token = stmt.Token[1:] + x.Stmt[i] = block + new := &Line{Token: tokens[1:], InBlock: true} + block.Line = append(block.Line, new) + return new + } + + case *LineBlock: + if stmt == hint { + if stmt.Token[0] != tokens[0] { + return newLineAfter(i) + } + + new := &Line{Token: tokens[1:], InBlock: true} + stmt.Line = append(stmt.Line, new) + return new + } + + for j, line := range stmt.Line { + if line == hint { + if stmt.Token[0] != tokens[0] { + return newLineAfter(i) + } + + // Add new line after hint within the block. + stmt.Line = append(stmt.Line, nil) + copy(stmt.Line[j+2:], stmt.Line[j+1:]) + new := &Line{Token: tokens[1:], InBlock: true} + stmt.Line[j+1] = new + return new + } + } + } + } + } + + new := &Line{Token: tokens} + x.Stmt = append(x.Stmt, new) + return new +} + +func (x *FileSyntax) updateLine(line *Line, tokens ...string) { + if line.InBlock { + tokens = tokens[1:] + } + line.Token = tokens +} + +// markRemoved modifies line so that it (and its end-of-line comment, if any) +// will be dropped by (*FileSyntax).Cleanup. +func (line *Line) markRemoved() { + line.Token = nil + line.Comments.Suffix = nil +} + +// Cleanup cleans up the file syntax x after any edit operations. +// To avoid quadratic behavior, (*Line).markRemoved marks the line as dead +// by setting line.Token = nil but does not remove it from the slice +// in which it appears. After edits have all been indicated, +// calling Cleanup cleans out the dead lines. +func (x *FileSyntax) Cleanup() { + w := 0 + for _, stmt := range x.Stmt { + switch stmt := stmt.(type) { + case *Line: + if stmt.Token == nil { + continue + } + case *LineBlock: + ww := 0 + for _, line := range stmt.Line { + if line.Token != nil { + stmt.Line[ww] = line + ww++ + } + } + if ww == 0 { + continue + } + if ww == 1 && len(stmt.RParen.Comments.Before) == 0 { + // Collapse block into single line. + line := &Line{ + Comments: Comments{ + Before: commentsAdd(stmt.Before, stmt.Line[0].Before), + Suffix: commentsAdd(stmt.Line[0].Suffix, stmt.Suffix), + After: commentsAdd(stmt.Line[0].After, stmt.After), + }, + Token: stringsAdd(stmt.Token, stmt.Line[0].Token), + } + x.Stmt[w] = line + w++ + continue + } + stmt.Line = stmt.Line[:ww] + } + x.Stmt[w] = stmt + w++ + } + x.Stmt = x.Stmt[:w] +} + +func commentsAdd(x, y []Comment) []Comment { + return append(x[:len(x):len(x)], y...) +} + +func stringsAdd(x, y []string) []string { + return append(x[:len(x):len(x)], y...) +} + +// A CommentBlock represents a top-level block of comments separate +// from any rule. +type CommentBlock struct { + Comments + Start Position +} + +func (x *CommentBlock) Span() (start, end Position) { + return x.Start, x.Start +} + +// A Line is a single line of tokens. +type Line struct { + Comments + Start Position + Token []string + InBlock bool + End Position +} + +func (x *Line) Span() (start, end Position) { + return x.Start, x.End +} + +// A LineBlock is a factored block of lines, like +// +// require ( +// "x" +// "y" +// ) +type LineBlock struct { + Comments + Start Position + LParen LParen + Token []string + Line []*Line + RParen RParen +} + +func (x *LineBlock) Span() (start, end Position) { + return x.Start, x.RParen.Pos.add(")") +} + +// An LParen represents the beginning of a parenthesized line block. +// It is a place to store suffix comments. +type LParen struct { + Comments + Pos Position +} + +func (x *LParen) Span() (start, end Position) { + return x.Pos, x.Pos.add(")") +} + +// An RParen represents the end of a parenthesized line block. +// It is a place to store whole-line (before) comments. +type RParen struct { + Comments + Pos Position +} + +func (x *RParen) Span() (start, end Position) { + return x.Pos, x.Pos.add(")") +} + +// An input represents a single input file being parsed. +type input struct { + // Lexing state. + filename string // name of input file, for errors + complete []byte // entire input + remaining []byte // remaining input + tokenStart []byte // token being scanned to end of input + token token // next token to be returned by lex, peek + pos Position // current input position + comments []Comment // accumulated comments + + // Parser state. + file *FileSyntax // returned top-level syntax tree + parseErrors ErrorList // errors encountered during parsing + + // Comment assignment state. + pre []Expr // all expressions, in preorder traversal + post []Expr // all expressions, in postorder traversal +} + +func newInput(filename string, data []byte) *input { + return &input{ + filename: filename, + complete: data, + remaining: data, + pos: Position{Line: 1, LineRune: 1, Byte: 0}, + } +} + +// parse parses the input file. +func parse(file string, data []byte) (f *FileSyntax, err error) { + // The parser panics for both routine errors like syntax errors + // and for programmer bugs like array index errors. + // Turn both into error returns. Catching bug panics is + // especially important when processing many files. + in := newInput(file, data) + defer func() { + if e := recover(); e != nil && e != &in.parseErrors { + in.parseErrors = append(in.parseErrors, Error{ + Filename: in.filename, + Pos: in.pos, + Err: fmt.Errorf("internal error: %v", e), + }) + } + if err == nil && len(in.parseErrors) > 0 { + err = in.parseErrors + } + }() + + // Prime the lexer by reading in the first token. It will be available + // in the next peek() or lex() call. + in.readToken() + + // Invoke the parser. + in.parseFile() + if len(in.parseErrors) > 0 { + return nil, in.parseErrors + } + in.file.Name = in.filename + + // Assign comments to nearby syntax. + in.assignComments() + + return in.file, nil +} + +// Error is called to report an error. +// Error does not return: it panics. +func (in *input) Error(s string) { + in.parseErrors = append(in.parseErrors, Error{ + Filename: in.filename, + Pos: in.pos, + Err: errors.New(s), + }) + panic(&in.parseErrors) +} + +// eof reports whether the input has reached end of file. +func (in *input) eof() bool { + return len(in.remaining) == 0 +} + +// peekRune returns the next rune in the input without consuming it. +func (in *input) peekRune() int { + if len(in.remaining) == 0 { + return 0 + } + r, _ := utf8.DecodeRune(in.remaining) + return int(r) +} + +// peekPrefix reports whether the remaining input begins with the given prefix. +func (in *input) peekPrefix(prefix string) bool { + // This is like bytes.HasPrefix(in.remaining, []byte(prefix)) + // but without the allocation of the []byte copy of prefix. + for i := 0; i < len(prefix); i++ { + if i >= len(in.remaining) || in.remaining[i] != prefix[i] { + return false + } + } + return true +} + +// readRune consumes and returns the next rune in the input. +func (in *input) readRune() int { + if len(in.remaining) == 0 { + in.Error("internal lexer error: readRune at EOF") + } + r, size := utf8.DecodeRune(in.remaining) + in.remaining = in.remaining[size:] + if r == '\n' { + in.pos.Line++ + in.pos.LineRune = 1 + } else { + in.pos.LineRune++ + } + in.pos.Byte += size + return int(r) +} + +type token struct { + kind tokenKind + pos Position + endPos Position + text string +} + +type tokenKind int + +const ( + _EOF tokenKind = -(iota + 1) + _EOLCOMMENT + _IDENT + _STRING + _COMMENT + + // newlines and punctuation tokens are allowed as ASCII codes. +) + +func (k tokenKind) isComment() bool { + return k == _COMMENT || k == _EOLCOMMENT +} + +// isEOL returns whether a token terminates a line. +func (k tokenKind) isEOL() bool { + return k == _EOF || k == _EOLCOMMENT || k == '\n' +} + +// startToken marks the beginning of the next input token. +// It must be followed by a call to endToken, once the token's text has +// been consumed using readRune. +func (in *input) startToken() { + in.tokenStart = in.remaining + in.token.text = "" + in.token.pos = in.pos +} + +// endToken marks the end of an input token. +// It records the actual token string in tok.text. +// A single trailing newline (LF or CRLF) will be removed from comment tokens. +func (in *input) endToken(kind tokenKind) { + in.token.kind = kind + text := string(in.tokenStart[:len(in.tokenStart)-len(in.remaining)]) + if kind.isComment() { + if strings.HasSuffix(text, "\r\n") { + text = text[:len(text)-2] + } else { + text = strings.TrimSuffix(text, "\n") + } + } + in.token.text = text + in.token.endPos = in.pos +} + +// peek returns the kind of the next token returned by lex. +func (in *input) peek() tokenKind { + return in.token.kind +} + +// lex is called from the parser to obtain the next input token. +func (in *input) lex() token { + tok := in.token + in.readToken() + return tok +} + +// readToken lexes the next token from the text and stores it in in.token. +func (in *input) readToken() { + // Skip past spaces, stopping at non-space or EOF. + for !in.eof() { + c := in.peekRune() + if c == ' ' || c == '\t' || c == '\r' { + in.readRune() + continue + } + + // Comment runs to end of line. + if in.peekPrefix("//") { + in.startToken() + + // Is this comment the only thing on its line? + // Find the last \n before this // and see if it's all + // spaces from there to here. + i := bytes.LastIndex(in.complete[:in.pos.Byte], []byte("\n")) + suffix := len(bytes.TrimSpace(in.complete[i+1:in.pos.Byte])) > 0 + in.readRune() + in.readRune() + + // Consume comment. + for len(in.remaining) > 0 && in.readRune() != '\n' { + } + + // If we are at top level (not in a statement), hand the comment to + // the parser as a _COMMENT token. The grammar is written + // to handle top-level comments itself. + if !suffix { + in.endToken(_COMMENT) + return + } + + // Otherwise, save comment for later attachment to syntax tree. + in.endToken(_EOLCOMMENT) + in.comments = append(in.comments, Comment{in.token.pos, in.token.text, suffix}) + return + } + + if in.peekPrefix("/*") { + in.Error("mod files must use // comments (not /* */ comments)") + } + + // Found non-space non-comment. + break + } + + // Found the beginning of the next token. + in.startToken() + + // End of file. + if in.eof() { + in.endToken(_EOF) + return + } + + // Punctuation tokens. + switch c := in.peekRune(); c { + case '\n', '(', ')', '[', ']', '{', '}', ',': + in.readRune() + in.endToken(tokenKind(c)) + return + + case '"', '`': // quoted string + quote := c + in.readRune() + for { + if in.eof() { + in.pos = in.token.pos + in.Error("unexpected EOF in string") + } + if in.peekRune() == '\n' { + in.Error("unexpected newline in string") + } + c := in.readRune() + if c == quote { + break + } + if c == '\\' && quote != '`' { + if in.eof() { + in.pos = in.token.pos + in.Error("unexpected EOF in string") + } + in.readRune() + } + } + in.endToken(_STRING) + return + } + + // Checked all punctuation. Must be identifier token. + if c := in.peekRune(); !isIdent(c) { + in.Error(fmt.Sprintf("unexpected input character %#q", c)) + } + + // Scan over identifier. + for isIdent(in.peekRune()) { + if in.peekPrefix("//") { + break + } + if in.peekPrefix("/*") { + in.Error("mod files must use // comments (not /* */ comments)") + } + in.readRune() + } + in.endToken(_IDENT) +} + +// isIdent reports whether c is an identifier rune. +// We treat most printable runes as identifier runes, except for a handful of +// ASCII punctuation characters. +func isIdent(c int) bool { + switch r := rune(c); r { + case ' ', '(', ')', '[', ']', '{', '}', ',': + return false + default: + return !unicode.IsSpace(r) && unicode.IsPrint(r) + } +} + +// Comment assignment. +// We build two lists of all subexpressions, preorder and postorder. +// The preorder list is ordered by start location, with outer expressions first. +// The postorder list is ordered by end location, with outer expressions last. +// We use the preorder list to assign each whole-line comment to the syntax +// immediately following it, and we use the postorder list to assign each +// end-of-line comment to the syntax immediately preceding it. + +// order walks the expression adding it and its subexpressions to the +// preorder and postorder lists. +func (in *input) order(x Expr) { + if x != nil { + in.pre = append(in.pre, x) + } + switch x := x.(type) { + default: + panic(fmt.Errorf("order: unexpected type %T", x)) + case nil: + // nothing + case *LParen, *RParen: + // nothing + case *CommentBlock: + // nothing + case *Line: + // nothing + case *FileSyntax: + for _, stmt := range x.Stmt { + in.order(stmt) + } + case *LineBlock: + in.order(&x.LParen) + for _, l := range x.Line { + in.order(l) + } + in.order(&x.RParen) + } + if x != nil { + in.post = append(in.post, x) + } +} + +// assignComments attaches comments to nearby syntax. +func (in *input) assignComments() { + const debug = false + + // Generate preorder and postorder lists. + in.order(in.file) + + // Split into whole-line comments and suffix comments. + var line, suffix []Comment + for _, com := range in.comments { + if com.Suffix { + suffix = append(suffix, com) + } else { + line = append(line, com) + } + } + + if debug { + for _, c := range line { + fmt.Fprintf(os.Stderr, "LINE %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte) + } + } + + // Assign line comments to syntax immediately following. + for _, x := range in.pre { + start, _ := x.Span() + if debug { + fmt.Fprintf(os.Stderr, "pre %T :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte) + } + xcom := x.Comment() + for len(line) > 0 && start.Byte >= line[0].Start.Byte { + if debug { + fmt.Fprintf(os.Stderr, "ASSIGN LINE %q #%d\n", line[0].Token, line[0].Start.Byte) + } + xcom.Before = append(xcom.Before, line[0]) + line = line[1:] + } + } + + // Remaining line comments go at end of file. + in.file.After = append(in.file.After, line...) + + if debug { + for _, c := range suffix { + fmt.Fprintf(os.Stderr, "SUFFIX %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte) + } + } + + // Assign suffix comments to syntax immediately before. + for i := len(in.post) - 1; i >= 0; i-- { + x := in.post[i] + + start, end := x.Span() + if debug { + fmt.Fprintf(os.Stderr, "post %T :%d:%d #%d :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte, end.Line, end.LineRune, end.Byte) + } + + // Do not assign suffix comments to end of line block or whole file. + // Instead assign them to the last element inside. + switch x.(type) { + case *FileSyntax: + continue + } + + // Do not assign suffix comments to something that starts + // on an earlier line, so that in + // + // x ( y + // z ) // comment + // + // we assign the comment to z and not to x ( ... ). + if start.Line != end.Line { + continue + } + xcom := x.Comment() + for len(suffix) > 0 && end.Byte <= suffix[len(suffix)-1].Start.Byte { + if debug { + fmt.Fprintf(os.Stderr, "ASSIGN SUFFIX %q #%d\n", suffix[len(suffix)-1].Token, suffix[len(suffix)-1].Start.Byte) + } + xcom.Suffix = append(xcom.Suffix, suffix[len(suffix)-1]) + suffix = suffix[:len(suffix)-1] + } + } + + // We assigned suffix comments in reverse. + // If multiple suffix comments were appended to the same + // expression node, they are now in reverse. Fix that. + for _, x := range in.post { + reverseComments(x.Comment().Suffix) + } + + // Remaining suffix comments go at beginning of file. + in.file.Before = append(in.file.Before, suffix...) +} + +// reverseComments reverses the []Comment list. +func reverseComments(list []Comment) { + for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 { + list[i], list[j] = list[j], list[i] + } +} + +func (in *input) parseFile() { + in.file = new(FileSyntax) + var cb *CommentBlock + for { + switch in.peek() { + case '\n': + in.lex() + if cb != nil { + in.file.Stmt = append(in.file.Stmt, cb) + cb = nil + } + case _COMMENT: + tok := in.lex() + if cb == nil { + cb = &CommentBlock{Start: tok.pos} + } + com := cb.Comment() + com.Before = append(com.Before, Comment{Start: tok.pos, Token: tok.text}) + case _EOF: + if cb != nil { + in.file.Stmt = append(in.file.Stmt, cb) + } + return + default: + in.parseStmt() + if cb != nil { + in.file.Stmt[len(in.file.Stmt)-1].Comment().Before = cb.Before + cb = nil + } + } + } +} + +func (in *input) parseStmt() { + tok := in.lex() + start := tok.pos + end := tok.endPos + tokens := []string{tok.text} + for { + tok := in.lex() + switch { + case tok.kind.isEOL(): + in.file.Stmt = append(in.file.Stmt, &Line{ + Start: start, + Token: tokens, + End: end, + }) + return + + case tok.kind == '(': + if next := in.peek(); next.isEOL() { + // Start of block: no more tokens on this line. + in.file.Stmt = append(in.file.Stmt, in.parseLineBlock(start, tokens, tok)) + return + } else if next == ')' { + rparen := in.lex() + if in.peek().isEOL() { + // Empty block. + in.lex() + in.file.Stmt = append(in.file.Stmt, &LineBlock{ + Start: start, + Token: tokens, + LParen: LParen{Pos: tok.pos}, + RParen: RParen{Pos: rparen.pos}, + }) + return + } + // '( )' in the middle of the line, not a block. + tokens = append(tokens, tok.text, rparen.text) + } else { + // '(' in the middle of the line, not a block. + tokens = append(tokens, tok.text) + } + + default: + tokens = append(tokens, tok.text) + end = tok.endPos + } + } +} + +func (in *input) parseLineBlock(start Position, token []string, lparen token) *LineBlock { + x := &LineBlock{ + Start: start, + Token: token, + LParen: LParen{Pos: lparen.pos}, + } + var comments []Comment + for { + switch in.peek() { + case _EOLCOMMENT: + // Suffix comment, will be attached later by assignComments. + in.lex() + case '\n': + // Blank line. Add an empty comment to preserve it. + in.lex() + if len(comments) == 0 && len(x.Line) > 0 || len(comments) > 0 && comments[len(comments)-1].Token != "" { + comments = append(comments, Comment{}) + } + case _COMMENT: + tok := in.lex() + comments = append(comments, Comment{Start: tok.pos, Token: tok.text}) + case _EOF: + in.Error(fmt.Sprintf("syntax error (unterminated block started at %s:%d:%d)", in.filename, x.Start.Line, x.Start.LineRune)) + case ')': + rparen := in.lex() + x.RParen.Before = comments + x.RParen.Pos = rparen.pos + if !in.peek().isEOL() { + in.Error("syntax error (expected newline after closing paren)") + } + in.lex() + return x + default: + l := in.parseLine() + x.Line = append(x.Line, l) + l.Comment().Before = comments + comments = nil + } + } +} + +func (in *input) parseLine() *Line { + tok := in.lex() + if tok.kind.isEOL() { + in.Error("internal parse error: parseLine at end of line") + } + start := tok.pos + end := tok.endPos + tokens := []string{tok.text} + for { + tok := in.lex() + if tok.kind.isEOL() { + return &Line{ + Start: start, + Token: tokens, + End: end, + InBlock: true, + } + } + tokens = append(tokens, tok.text) + end = tok.endPos + } +} + +var ( + slashSlash = []byte("//") + moduleStr = []byte("module") +) + +// ModulePath returns the module path from the gomod file text. +// If it cannot find a module path, it returns an empty string. +// It is tolerant of unrelated problems in the go.mod file. +func ModulePath(mod []byte) string { + for len(mod) > 0 { + line := mod + mod = nil + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, mod = line[:i], line[i+1:] + } + if i := bytes.Index(line, slashSlash); i >= 0 { + line = line[:i] + } + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, moduleStr) { + continue + } + line = line[len(moduleStr):] + n := len(line) + line = bytes.TrimSpace(line) + if len(line) == n || len(line) == 0 { + continue + } + + if line[0] == '"' || line[0] == '`' { + p, err := strconv.Unquote(string(line)) + if err != nil { + return "" // malformed quoted string or multiline module path + } + return p + } + + return string(line) + } + return "" // missing module path +} diff --git a/vendor/golang.org/x/mod/modfile/rule.go b/vendor/golang.org/x/mod/modfile/rule.go new file mode 100644 index 00000000..0e7b7e26 --- /dev/null +++ b/vendor/golang.org/x/mod/modfile/rule.go @@ -0,0 +1,1666 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package modfile implements a parser and formatter for go.mod files. +// +// The go.mod syntax is described in +// https://pkg.go.dev/cmd/go/#hdr-The_go_mod_file. +// +// The [Parse] and [ParseLax] functions both parse a go.mod file and return an +// abstract syntax tree. ParseLax ignores unknown statements and may be used to +// parse go.mod files that may have been developed with newer versions of Go. +// +// The [File] struct returned by Parse and ParseLax represent an abstract +// go.mod file. File has several methods like [File.AddNewRequire] and +// [File.DropReplace] that can be used to programmatically edit a file. +// +// The [Format] function formats a File back to a byte slice which can be +// written to a file. +package modfile + +import ( + "errors" + "fmt" + "path/filepath" + "sort" + "strconv" + "strings" + "unicode" + + "golang.org/x/mod/internal/lazyregexp" + "golang.org/x/mod/module" + "golang.org/x/mod/semver" +) + +// A File is the parsed, interpreted form of a go.mod file. +type File struct { + Module *Module + Go *Go + Toolchain *Toolchain + Require []*Require + Exclude []*Exclude + Replace []*Replace + Retract []*Retract + + Syntax *FileSyntax +} + +// A Module is the module statement. +type Module struct { + Mod module.Version + Deprecated string + Syntax *Line +} + +// A Go is the go statement. +type Go struct { + Version string // "1.23" + Syntax *Line +} + +// A Toolchain is the toolchain statement. +type Toolchain struct { + Name string // "go1.21rc1" + Syntax *Line +} + +// An Exclude is a single exclude statement. +type Exclude struct { + Mod module.Version + Syntax *Line +} + +// A Replace is a single replace statement. +type Replace struct { + Old module.Version + New module.Version + Syntax *Line +} + +// A Retract is a single retract statement. +type Retract struct { + VersionInterval + Rationale string + Syntax *Line +} + +// A VersionInterval represents a range of versions with upper and lower bounds. +// Intervals are closed: both bounds are included. When Low is equal to High, +// the interval may refer to a single version ('v1.2.3') or an interval +// ('[v1.2.3, v1.2.3]'); both have the same representation. +type VersionInterval struct { + Low, High string +} + +// A Require is a single require statement. +type Require struct { + Mod module.Version + Indirect bool // has "// indirect" comment + Syntax *Line +} + +func (r *Require) markRemoved() { + r.Syntax.markRemoved() + *r = Require{} +} + +func (r *Require) setVersion(v string) { + r.Mod.Version = v + + if line := r.Syntax; len(line.Token) > 0 { + if line.InBlock { + // If the line is preceded by an empty line, remove it; see + // https://golang.org/issue/33779. + if len(line.Comments.Before) == 1 && len(line.Comments.Before[0].Token) == 0 { + line.Comments.Before = line.Comments.Before[:0] + } + if len(line.Token) >= 2 { // example.com v1.2.3 + line.Token[1] = v + } + } else { + if len(line.Token) >= 3 { // require example.com v1.2.3 + line.Token[2] = v + } + } + } +} + +// setIndirect sets line to have (or not have) a "// indirect" comment. +func (r *Require) setIndirect(indirect bool) { + r.Indirect = indirect + line := r.Syntax + if isIndirect(line) == indirect { + return + } + if indirect { + // Adding comment. + if len(line.Suffix) == 0 { + // New comment. + line.Suffix = []Comment{{Token: "// indirect", Suffix: true}} + return + } + + com := &line.Suffix[0] + text := strings.TrimSpace(strings.TrimPrefix(com.Token, string(slashSlash))) + if text == "" { + // Empty comment. + com.Token = "// indirect" + return + } + + // Insert at beginning of existing comment. + com.Token = "// indirect; " + text + return + } + + // Removing comment. + f := strings.TrimSpace(strings.TrimPrefix(line.Suffix[0].Token, string(slashSlash))) + if f == "indirect" { + // Remove whole comment. + line.Suffix = nil + return + } + + // Remove comment prefix. + com := &line.Suffix[0] + i := strings.Index(com.Token, "indirect;") + com.Token = "//" + com.Token[i+len("indirect;"):] +} + +// isIndirect reports whether line has a "// indirect" comment, +// meaning it is in go.mod only for its effect on indirect dependencies, +// so that it can be dropped entirely once the effective version of the +// indirect dependency reaches the given minimum version. +func isIndirect(line *Line) bool { + if len(line.Suffix) == 0 { + return false + } + f := strings.Fields(strings.TrimPrefix(line.Suffix[0].Token, string(slashSlash))) + return (len(f) == 1 && f[0] == "indirect" || len(f) > 1 && f[0] == "indirect;") +} + +func (f *File) AddModuleStmt(path string) error { + if f.Syntax == nil { + f.Syntax = new(FileSyntax) + } + if f.Module == nil { + f.Module = &Module{ + Mod: module.Version{Path: path}, + Syntax: f.Syntax.addLine(nil, "module", AutoQuote(path)), + } + } else { + f.Module.Mod.Path = path + f.Syntax.updateLine(f.Module.Syntax, "module", AutoQuote(path)) + } + return nil +} + +func (f *File) AddComment(text string) { + if f.Syntax == nil { + f.Syntax = new(FileSyntax) + } + f.Syntax.Stmt = append(f.Syntax.Stmt, &CommentBlock{ + Comments: Comments{ + Before: []Comment{ + { + Token: text, + }, + }, + }, + }) +} + +type VersionFixer func(path, version string) (string, error) + +// errDontFix is returned by a VersionFixer to indicate the version should be +// left alone, even if it's not canonical. +var dontFixRetract VersionFixer = func(_, vers string) (string, error) { + return vers, nil +} + +// Parse parses and returns a go.mod file. +// +// file is the name of the file, used in positions and errors. +// +// data is the content of the file. +// +// fix is an optional function that canonicalizes module versions. +// If fix is nil, all module versions must be canonical ([module.CanonicalVersion] +// must return the same string). +func Parse(file string, data []byte, fix VersionFixer) (*File, error) { + return parseToFile(file, data, fix, true) +} + +// ParseLax is like Parse but ignores unknown statements. +// It is used when parsing go.mod files other than the main module, +// under the theory that most statement types we add in the future will +// only apply in the main module, like exclude and replace, +// and so we get better gradual deployments if old go commands +// simply ignore those statements when found in go.mod files +// in dependencies. +func ParseLax(file string, data []byte, fix VersionFixer) (*File, error) { + return parseToFile(file, data, fix, false) +} + +func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (parsed *File, err error) { + fs, err := parse(file, data) + if err != nil { + return nil, err + } + f := &File{ + Syntax: fs, + } + var errs ErrorList + + // fix versions in retract directives after the file is parsed. + // We need the module path to fix versions, and it might be at the end. + defer func() { + oldLen := len(errs) + f.fixRetract(fix, &errs) + if len(errs) > oldLen { + parsed, err = nil, errs + } + }() + + for _, x := range fs.Stmt { + switch x := x.(type) { + case *Line: + f.add(&errs, nil, x, x.Token[0], x.Token[1:], fix, strict) + + case *LineBlock: + if len(x.Token) > 1 { + if strict { + errs = append(errs, Error{ + Filename: file, + Pos: x.Start, + Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")), + }) + } + continue + } + switch x.Token[0] { + default: + if strict { + errs = append(errs, Error{ + Filename: file, + Pos: x.Start, + Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")), + }) + } + continue + case "module", "require", "exclude", "replace", "retract": + for _, l := range x.Line { + f.add(&errs, x, l, x.Token[0], l.Token, fix, strict) + } + } + } + } + + if len(errs) > 0 { + return nil, errs + } + return f, nil +} + +var GoVersionRE = lazyregexp.New(`^([1-9][0-9]*)\.(0|[1-9][0-9]*)(\.(0|[1-9][0-9]*))?([a-z]+[0-9]+)?$`) +var laxGoVersionRE = lazyregexp.New(`^v?(([1-9][0-9]*)\.(0|[1-9][0-9]*))([^0-9].*)$`) + +// Toolchains must be named beginning with `go1`, +// like "go1.20.3" or "go1.20.3-gccgo". As a special case, "default" is also permitted. +// TODO(samthanawalla): Replace regex with https://pkg.go.dev/go/version#IsValid in 1.23+ +var ToolchainRE = lazyregexp.New(`^default$|^go1($|\.)`) + +func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, args []string, fix VersionFixer, strict bool) { + // If strict is false, this module is a dependency. + // We ignore all unknown directives as well as main-module-only + // directives like replace and exclude. It will work better for + // forward compatibility if we can depend on modules that have unknown + // statements (presumed relevant only when acting as the main module) + // and simply ignore those statements. + if !strict { + switch verb { + case "go", "module", "retract", "require": + // want these even for dependency go.mods + default: + return + } + } + + wrapModPathError := func(modPath string, err error) { + *errs = append(*errs, Error{ + Filename: f.Syntax.Name, + Pos: line.Start, + ModPath: modPath, + Verb: verb, + Err: err, + }) + } + wrapError := func(err error) { + *errs = append(*errs, Error{ + Filename: f.Syntax.Name, + Pos: line.Start, + Err: err, + }) + } + errorf := func(format string, args ...interface{}) { + wrapError(fmt.Errorf(format, args...)) + } + + switch verb { + default: + errorf("unknown directive: %s", verb) + + case "go": + if f.Go != nil { + errorf("repeated go statement") + return + } + if len(args) != 1 { + errorf("go directive expects exactly one argument") + return + } else if !GoVersionRE.MatchString(args[0]) { + fixed := false + if !strict { + if m := laxGoVersionRE.FindStringSubmatch(args[0]); m != nil { + args[0] = m[1] + fixed = true + } + } + if !fixed { + errorf("invalid go version '%s': must match format 1.23.0", args[0]) + return + } + } + + f.Go = &Go{Syntax: line} + f.Go.Version = args[0] + + case "toolchain": + if f.Toolchain != nil { + errorf("repeated toolchain statement") + return + } + if len(args) != 1 { + errorf("toolchain directive expects exactly one argument") + return + } else if strict && !ToolchainRE.MatchString(args[0]) { + errorf("invalid toolchain version '%s': must match format go1.23.0 or default", args[0]) + return + } + f.Toolchain = &Toolchain{Syntax: line} + f.Toolchain.Name = args[0] + + case "module": + if f.Module != nil { + errorf("repeated module statement") + return + } + deprecated := parseDeprecation(block, line) + f.Module = &Module{ + Syntax: line, + Deprecated: deprecated, + } + if len(args) != 1 { + errorf("usage: module module/path") + return + } + s, err := parseString(&args[0]) + if err != nil { + errorf("invalid quoted string: %v", err) + return + } + f.Module.Mod = module.Version{Path: s} + + case "require", "exclude": + if len(args) != 2 { + errorf("usage: %s module/path v1.2.3", verb) + return + } + s, err := parseString(&args[0]) + if err != nil { + errorf("invalid quoted string: %v", err) + return + } + v, err := parseVersion(verb, s, &args[1], fix) + if err != nil { + wrapError(err) + return + } + pathMajor, err := modulePathMajor(s) + if err != nil { + wrapError(err) + return + } + if err := module.CheckPathMajor(v, pathMajor); err != nil { + wrapModPathError(s, err) + return + } + if verb == "require" { + f.Require = append(f.Require, &Require{ + Mod: module.Version{Path: s, Version: v}, + Syntax: line, + Indirect: isIndirect(line), + }) + } else { + f.Exclude = append(f.Exclude, &Exclude{ + Mod: module.Version{Path: s, Version: v}, + Syntax: line, + }) + } + + case "replace": + replace, wrappederr := parseReplace(f.Syntax.Name, line, verb, args, fix) + if wrappederr != nil { + *errs = append(*errs, *wrappederr) + return + } + f.Replace = append(f.Replace, replace) + + case "retract": + rationale := parseDirectiveComment(block, line) + vi, err := parseVersionInterval(verb, "", &args, dontFixRetract) + if err != nil { + if strict { + wrapError(err) + return + } else { + // Only report errors parsing intervals in the main module. We may + // support additional syntax in the future, such as open and half-open + // intervals. Those can't be supported now, because they break the + // go.mod parser, even in lax mode. + return + } + } + if len(args) > 0 && strict { + // In the future, there may be additional information after the version. + errorf("unexpected token after version: %q", args[0]) + return + } + retract := &Retract{ + VersionInterval: vi, + Rationale: rationale, + Syntax: line, + } + f.Retract = append(f.Retract, retract) + } +} + +func parseReplace(filename string, line *Line, verb string, args []string, fix VersionFixer) (*Replace, *Error) { + wrapModPathError := func(modPath string, err error) *Error { + return &Error{ + Filename: filename, + Pos: line.Start, + ModPath: modPath, + Verb: verb, + Err: err, + } + } + wrapError := func(err error) *Error { + return &Error{ + Filename: filename, + Pos: line.Start, + Err: err, + } + } + errorf := func(format string, args ...interface{}) *Error { + return wrapError(fmt.Errorf(format, args...)) + } + + arrow := 2 + if len(args) >= 2 && args[1] == "=>" { + arrow = 1 + } + if len(args) < arrow+2 || len(args) > arrow+3 || args[arrow] != "=>" { + return nil, errorf("usage: %s module/path [v1.2.3] => other/module v1.4\n\t or %s module/path [v1.2.3] => ../local/directory", verb, verb) + } + s, err := parseString(&args[0]) + if err != nil { + return nil, errorf("invalid quoted string: %v", err) + } + pathMajor, err := modulePathMajor(s) + if err != nil { + return nil, wrapModPathError(s, err) + + } + var v string + if arrow == 2 { + v, err = parseVersion(verb, s, &args[1], fix) + if err != nil { + return nil, wrapError(err) + } + if err := module.CheckPathMajor(v, pathMajor); err != nil { + return nil, wrapModPathError(s, err) + } + } + ns, err := parseString(&args[arrow+1]) + if err != nil { + return nil, errorf("invalid quoted string: %v", err) + } + nv := "" + if len(args) == arrow+2 { + if !IsDirectoryPath(ns) { + if strings.Contains(ns, "@") { + return nil, errorf("replacement module must match format 'path version', not 'path@version'") + } + return nil, errorf("replacement module without version must be directory path (rooted or starting with . or ..)") + } + if filepath.Separator == '/' && strings.Contains(ns, `\`) { + return nil, errorf("replacement directory appears to be Windows path (on a non-windows system)") + } + } + if len(args) == arrow+3 { + nv, err = parseVersion(verb, ns, &args[arrow+2], fix) + if err != nil { + return nil, wrapError(err) + } + if IsDirectoryPath(ns) { + return nil, errorf("replacement module directory path %q cannot have version", ns) + } + } + return &Replace{ + Old: module.Version{Path: s, Version: v}, + New: module.Version{Path: ns, Version: nv}, + Syntax: line, + }, nil +} + +// fixRetract applies fix to each retract directive in f, appending any errors +// to errs. +// +// Most versions are fixed as we parse the file, but for retract directives, +// the relevant module path is the one specified with the module directive, +// and that might appear at the end of the file (or not at all). +func (f *File) fixRetract(fix VersionFixer, errs *ErrorList) { + if fix == nil { + return + } + path := "" + if f.Module != nil { + path = f.Module.Mod.Path + } + var r *Retract + wrapError := func(err error) { + *errs = append(*errs, Error{ + Filename: f.Syntax.Name, + Pos: r.Syntax.Start, + Err: err, + }) + } + + for _, r = range f.Retract { + if path == "" { + wrapError(errors.New("no module directive found, so retract cannot be used")) + return // only print the first one of these + } + + args := r.Syntax.Token + if args[0] == "retract" { + args = args[1:] + } + vi, err := parseVersionInterval("retract", path, &args, fix) + if err != nil { + wrapError(err) + } + r.VersionInterval = vi + } +} + +func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string, fix VersionFixer) { + wrapError := func(err error) { + *errs = append(*errs, Error{ + Filename: f.Syntax.Name, + Pos: line.Start, + Err: err, + }) + } + errorf := func(format string, args ...interface{}) { + wrapError(fmt.Errorf(format, args...)) + } + + switch verb { + default: + errorf("unknown directive: %s", verb) + + case "go": + if f.Go != nil { + errorf("repeated go statement") + return + } + if len(args) != 1 { + errorf("go directive expects exactly one argument") + return + } else if !GoVersionRE.MatchString(args[0]) { + errorf("invalid go version '%s': must match format 1.23.0", args[0]) + return + } + + f.Go = &Go{Syntax: line} + f.Go.Version = args[0] + + case "toolchain": + if f.Toolchain != nil { + errorf("repeated toolchain statement") + return + } + if len(args) != 1 { + errorf("toolchain directive expects exactly one argument") + return + } else if !ToolchainRE.MatchString(args[0]) { + errorf("invalid toolchain version '%s': must match format go1.23.0 or default", args[0]) + return + } + + f.Toolchain = &Toolchain{Syntax: line} + f.Toolchain.Name = args[0] + + case "use": + if len(args) != 1 { + errorf("usage: %s local/dir", verb) + return + } + s, err := parseString(&args[0]) + if err != nil { + errorf("invalid quoted string: %v", err) + return + } + f.Use = append(f.Use, &Use{ + Path: s, + Syntax: line, + }) + + case "replace": + replace, wrappederr := parseReplace(f.Syntax.Name, line, verb, args, fix) + if wrappederr != nil { + *errs = append(*errs, *wrappederr) + return + } + f.Replace = append(f.Replace, replace) + } +} + +// IsDirectoryPath reports whether the given path should be interpreted as a directory path. +// Just like on the go command line, relative paths starting with a '.' or '..' path component +// and rooted paths are directory paths; the rest are module paths. +func IsDirectoryPath(ns string) bool { + // Because go.mod files can move from one system to another, + // we check all known path syntaxes, both Unix and Windows. + return ns == "." || strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, `.\`) || + ns == ".." || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, `..\`) || + strings.HasPrefix(ns, "/") || strings.HasPrefix(ns, `\`) || + len(ns) >= 2 && ('A' <= ns[0] && ns[0] <= 'Z' || 'a' <= ns[0] && ns[0] <= 'z') && ns[1] == ':' +} + +// MustQuote reports whether s must be quoted in order to appear as +// a single token in a go.mod line. +func MustQuote(s string) bool { + for _, r := range s { + switch r { + case ' ', '"', '\'', '`': + return true + + case '(', ')', '[', ']', '{', '}', ',': + if len(s) > 1 { + return true + } + + default: + if !unicode.IsPrint(r) { + return true + } + } + } + return s == "" || strings.Contains(s, "//") || strings.Contains(s, "/*") +} + +// AutoQuote returns s or, if quoting is required for s to appear in a go.mod, +// the quotation of s. +func AutoQuote(s string) string { + if MustQuote(s) { + return strconv.Quote(s) + } + return s +} + +func parseVersionInterval(verb string, path string, args *[]string, fix VersionFixer) (VersionInterval, error) { + toks := *args + if len(toks) == 0 || toks[0] == "(" { + return VersionInterval{}, fmt.Errorf("expected '[' or version") + } + if toks[0] != "[" { + v, err := parseVersion(verb, path, &toks[0], fix) + if err != nil { + return VersionInterval{}, err + } + *args = toks[1:] + return VersionInterval{Low: v, High: v}, nil + } + toks = toks[1:] + + if len(toks) == 0 { + return VersionInterval{}, fmt.Errorf("expected version after '['") + } + low, err := parseVersion(verb, path, &toks[0], fix) + if err != nil { + return VersionInterval{}, err + } + toks = toks[1:] + + if len(toks) == 0 || toks[0] != "," { + return VersionInterval{}, fmt.Errorf("expected ',' after version") + } + toks = toks[1:] + + if len(toks) == 0 { + return VersionInterval{}, fmt.Errorf("expected version after ','") + } + high, err := parseVersion(verb, path, &toks[0], fix) + if err != nil { + return VersionInterval{}, err + } + toks = toks[1:] + + if len(toks) == 0 || toks[0] != "]" { + return VersionInterval{}, fmt.Errorf("expected ']' after version") + } + toks = toks[1:] + + *args = toks + return VersionInterval{Low: low, High: high}, nil +} + +func parseString(s *string) (string, error) { + t := *s + if strings.HasPrefix(t, `"`) { + var err error + if t, err = strconv.Unquote(t); err != nil { + return "", err + } + } else if strings.ContainsAny(t, "\"'`") { + // Other quotes are reserved both for possible future expansion + // and to avoid confusion. For example if someone types 'x' + // we want that to be a syntax error and not a literal x in literal quotation marks. + return "", fmt.Errorf("unquoted string cannot contain quote") + } + *s = AutoQuote(t) + return t, nil +} + +var deprecatedRE = lazyregexp.New(`(?s)(?:^|\n\n)Deprecated: *(.*?)(?:$|\n\n)`) + +// parseDeprecation extracts the text of comments on a "module" directive and +// extracts a deprecation message from that. +// +// A deprecation message is contained in a paragraph within a block of comments +// that starts with "Deprecated:" (case sensitive). The message runs until the +// end of the paragraph and does not include the "Deprecated:" prefix. If the +// comment block has multiple paragraphs that start with "Deprecated:", +// parseDeprecation returns the message from the first. +func parseDeprecation(block *LineBlock, line *Line) string { + text := parseDirectiveComment(block, line) + m := deprecatedRE.FindStringSubmatch(text) + if m == nil { + return "" + } + return m[1] +} + +// parseDirectiveComment extracts the text of comments on a directive. +// If the directive's line does not have comments and is part of a block that +// does have comments, the block's comments are used. +func parseDirectiveComment(block *LineBlock, line *Line) string { + comments := line.Comment() + if block != nil && len(comments.Before) == 0 && len(comments.Suffix) == 0 { + comments = block.Comment() + } + groups := [][]Comment{comments.Before, comments.Suffix} + var lines []string + for _, g := range groups { + for _, c := range g { + if !strings.HasPrefix(c.Token, "//") { + continue // blank line + } + lines = append(lines, strings.TrimSpace(strings.TrimPrefix(c.Token, "//"))) + } + } + return strings.Join(lines, "\n") +} + +type ErrorList []Error + +func (e ErrorList) Error() string { + errStrs := make([]string, len(e)) + for i, err := range e { + errStrs[i] = err.Error() + } + return strings.Join(errStrs, "\n") +} + +type Error struct { + Filename string + Pos Position + Verb string + ModPath string + Err error +} + +func (e *Error) Error() string { + var pos string + if e.Pos.LineRune > 1 { + // Don't print LineRune if it's 1 (beginning of line). + // It's always 1 except in scanner errors, which are rare. + pos = fmt.Sprintf("%s:%d:%d: ", e.Filename, e.Pos.Line, e.Pos.LineRune) + } else if e.Pos.Line > 0 { + pos = fmt.Sprintf("%s:%d: ", e.Filename, e.Pos.Line) + } else if e.Filename != "" { + pos = fmt.Sprintf("%s: ", e.Filename) + } + + var directive string + if e.ModPath != "" { + directive = fmt.Sprintf("%s %s: ", e.Verb, e.ModPath) + } else if e.Verb != "" { + directive = fmt.Sprintf("%s: ", e.Verb) + } + + return pos + directive + e.Err.Error() +} + +func (e *Error) Unwrap() error { return e.Err } + +func parseVersion(verb string, path string, s *string, fix VersionFixer) (string, error) { + t, err := parseString(s) + if err != nil { + return "", &Error{ + Verb: verb, + ModPath: path, + Err: &module.InvalidVersionError{ + Version: *s, + Err: err, + }, + } + } + if fix != nil { + fixed, err := fix(path, t) + if err != nil { + if err, ok := err.(*module.ModuleError); ok { + return "", &Error{ + Verb: verb, + ModPath: path, + Err: err.Err, + } + } + return "", err + } + t = fixed + } else { + cv := module.CanonicalVersion(t) + if cv == "" { + return "", &Error{ + Verb: verb, + ModPath: path, + Err: &module.InvalidVersionError{ + Version: t, + Err: errors.New("must be of the form v1.2.3"), + }, + } + } + t = cv + } + *s = t + return *s, nil +} + +func modulePathMajor(path string) (string, error) { + _, major, ok := module.SplitPathVersion(path) + if !ok { + return "", fmt.Errorf("invalid module path") + } + return major, nil +} + +func (f *File) Format() ([]byte, error) { + return Format(f.Syntax), nil +} + +// Cleanup cleans up the file f after any edit operations. +// To avoid quadratic behavior, modifications like [File.DropRequire] +// clear the entry but do not remove it from the slice. +// Cleanup cleans out all the cleared entries. +func (f *File) Cleanup() { + w := 0 + for _, r := range f.Require { + if r.Mod.Path != "" { + f.Require[w] = r + w++ + } + } + f.Require = f.Require[:w] + + w = 0 + for _, x := range f.Exclude { + if x.Mod.Path != "" { + f.Exclude[w] = x + w++ + } + } + f.Exclude = f.Exclude[:w] + + w = 0 + for _, r := range f.Replace { + if r.Old.Path != "" { + f.Replace[w] = r + w++ + } + } + f.Replace = f.Replace[:w] + + w = 0 + for _, r := range f.Retract { + if r.Low != "" || r.High != "" { + f.Retract[w] = r + w++ + } + } + f.Retract = f.Retract[:w] + + f.Syntax.Cleanup() +} + +func (f *File) AddGoStmt(version string) error { + if !GoVersionRE.MatchString(version) { + return fmt.Errorf("invalid language version %q", version) + } + if f.Go == nil { + var hint Expr + if f.Module != nil && f.Module.Syntax != nil { + hint = f.Module.Syntax + } else if f.Syntax == nil { + f.Syntax = new(FileSyntax) + } + f.Go = &Go{ + Version: version, + Syntax: f.Syntax.addLine(hint, "go", version), + } + } else { + f.Go.Version = version + f.Syntax.updateLine(f.Go.Syntax, "go", version) + } + return nil +} + +// DropGoStmt deletes the go statement from the file. +func (f *File) DropGoStmt() { + if f.Go != nil { + f.Go.Syntax.markRemoved() + f.Go = nil + } +} + +// DropToolchainStmt deletes the toolchain statement from the file. +func (f *File) DropToolchainStmt() { + if f.Toolchain != nil { + f.Toolchain.Syntax.markRemoved() + f.Toolchain = nil + } +} + +func (f *File) AddToolchainStmt(name string) error { + if !ToolchainRE.MatchString(name) { + return fmt.Errorf("invalid toolchain name %q", name) + } + if f.Toolchain == nil { + var hint Expr + if f.Go != nil && f.Go.Syntax != nil { + hint = f.Go.Syntax + } else if f.Module != nil && f.Module.Syntax != nil { + hint = f.Module.Syntax + } + f.Toolchain = &Toolchain{ + Name: name, + Syntax: f.Syntax.addLine(hint, "toolchain", name), + } + } else { + f.Toolchain.Name = name + f.Syntax.updateLine(f.Toolchain.Syntax, "toolchain", name) + } + return nil +} + +// AddRequire sets the first require line for path to version vers, +// preserving any existing comments for that line and removing all +// other lines for path. +// +// If no line currently exists for path, AddRequire adds a new line +// at the end of the last require block. +func (f *File) AddRequire(path, vers string) error { + need := true + for _, r := range f.Require { + if r.Mod.Path == path { + if need { + r.Mod.Version = vers + f.Syntax.updateLine(r.Syntax, "require", AutoQuote(path), vers) + need = false + } else { + r.Syntax.markRemoved() + *r = Require{} + } + } + } + + if need { + f.AddNewRequire(path, vers, false) + } + return nil +} + +// AddNewRequire adds a new require line for path at version vers at the end of +// the last require block, regardless of any existing require lines for path. +func (f *File) AddNewRequire(path, vers string, indirect bool) { + line := f.Syntax.addLine(nil, "require", AutoQuote(path), vers) + r := &Require{ + Mod: module.Version{Path: path, Version: vers}, + Syntax: line, + } + r.setIndirect(indirect) + f.Require = append(f.Require, r) +} + +// SetRequire updates the requirements of f to contain exactly req, preserving +// the existing block structure and line comment contents (except for 'indirect' +// markings) for the first requirement on each named module path. +// +// The Syntax field is ignored for the requirements in req. +// +// Any requirements not already present in the file are added to the block +// containing the last require line. +// +// The requirements in req must specify at most one distinct version for each +// module path. +// +// If any existing requirements may be removed, the caller should call +// [File.Cleanup] after all edits are complete. +func (f *File) SetRequire(req []*Require) { + type elem struct { + version string + indirect bool + } + need := make(map[string]elem) + for _, r := range req { + if prev, dup := need[r.Mod.Path]; dup && prev.version != r.Mod.Version { + panic(fmt.Errorf("SetRequire called with conflicting versions for path %s (%s and %s)", r.Mod.Path, prev.version, r.Mod.Version)) + } + need[r.Mod.Path] = elem{r.Mod.Version, r.Indirect} + } + + // Update or delete the existing Require entries to preserve + // only the first for each module path in req. + for _, r := range f.Require { + e, ok := need[r.Mod.Path] + if ok { + r.setVersion(e.version) + r.setIndirect(e.indirect) + } else { + r.markRemoved() + } + delete(need, r.Mod.Path) + } + + // Add new entries in the last block of the file for any paths that weren't + // already present. + // + // This step is nondeterministic, but the final result will be deterministic + // because we will sort the block. + for path, e := range need { + f.AddNewRequire(path, e.version, e.indirect) + } + + f.SortBlocks() +} + +// SetRequireSeparateIndirect updates the requirements of f to contain the given +// requirements. Comment contents (except for 'indirect' markings) are retained +// from the first existing requirement for each module path. Like SetRequire, +// SetRequireSeparateIndirect adds requirements for new paths in req, +// updates the version and "// indirect" comment on existing requirements, +// and deletes requirements on paths not in req. Existing duplicate requirements +// are deleted. +// +// As its name suggests, SetRequireSeparateIndirect puts direct and indirect +// requirements into two separate blocks, one containing only direct +// requirements, and the other containing only indirect requirements. +// SetRequireSeparateIndirect may move requirements between these two blocks +// when their indirect markings change. However, SetRequireSeparateIndirect +// won't move requirements from other blocks, especially blocks with comments. +// +// If the file initially has one uncommented block of requirements, +// SetRequireSeparateIndirect will split it into a direct-only and indirect-only +// block. This aids in the transition to separate blocks. +func (f *File) SetRequireSeparateIndirect(req []*Require) { + // hasComments returns whether a line or block has comments + // other than "indirect". + hasComments := func(c Comments) bool { + return len(c.Before) > 0 || len(c.After) > 0 || len(c.Suffix) > 1 || + (len(c.Suffix) == 1 && + strings.TrimSpace(strings.TrimPrefix(c.Suffix[0].Token, string(slashSlash))) != "indirect") + } + + // moveReq adds r to block. If r was in another block, moveReq deletes + // it from that block and transfers its comments. + moveReq := func(r *Require, block *LineBlock) { + var line *Line + if r.Syntax == nil { + line = &Line{Token: []string{AutoQuote(r.Mod.Path), r.Mod.Version}} + r.Syntax = line + if r.Indirect { + r.setIndirect(true) + } + } else { + line = new(Line) + *line = *r.Syntax + if !line.InBlock && len(line.Token) > 0 && line.Token[0] == "require" { + line.Token = line.Token[1:] + } + r.Syntax.Token = nil // Cleanup will delete the old line. + r.Syntax = line + } + line.InBlock = true + block.Line = append(block.Line, line) + } + + // Examine existing require lines and blocks. + var ( + // We may insert new requirements into the last uncommented + // direct-only and indirect-only blocks. We may also move requirements + // to the opposite block if their indirect markings change. + lastDirectIndex = -1 + lastIndirectIndex = -1 + + // If there are no direct-only or indirect-only blocks, a new block may + // be inserted after the last require line or block. + lastRequireIndex = -1 + + // If there's only one require line or block, and it's uncommented, + // we'll move its requirements to the direct-only or indirect-only blocks. + requireLineOrBlockCount = 0 + + // Track the block each requirement belongs to (if any) so we can + // move them later. + lineToBlock = make(map[*Line]*LineBlock) + ) + for i, stmt := range f.Syntax.Stmt { + switch stmt := stmt.(type) { + case *Line: + if len(stmt.Token) == 0 || stmt.Token[0] != "require" { + continue + } + lastRequireIndex = i + requireLineOrBlockCount++ + if !hasComments(stmt.Comments) { + if isIndirect(stmt) { + lastIndirectIndex = i + } else { + lastDirectIndex = i + } + } + + case *LineBlock: + if len(stmt.Token) == 0 || stmt.Token[0] != "require" { + continue + } + lastRequireIndex = i + requireLineOrBlockCount++ + allDirect := len(stmt.Line) > 0 && !hasComments(stmt.Comments) + allIndirect := len(stmt.Line) > 0 && !hasComments(stmt.Comments) + for _, line := range stmt.Line { + lineToBlock[line] = stmt + if hasComments(line.Comments) { + allDirect = false + allIndirect = false + } else if isIndirect(line) { + allDirect = false + } else { + allIndirect = false + } + } + if allDirect { + lastDirectIndex = i + } + if allIndirect { + lastIndirectIndex = i + } + } + } + + oneFlatUncommentedBlock := requireLineOrBlockCount == 1 && + !hasComments(*f.Syntax.Stmt[lastRequireIndex].Comment()) + + // Create direct and indirect blocks if needed. Convert lines into blocks + // if needed. If we end up with an empty block or a one-line block, + // Cleanup will delete it or convert it to a line later. + insertBlock := func(i int) *LineBlock { + block := &LineBlock{Token: []string{"require"}} + f.Syntax.Stmt = append(f.Syntax.Stmt, nil) + copy(f.Syntax.Stmt[i+1:], f.Syntax.Stmt[i:]) + f.Syntax.Stmt[i] = block + return block + } + + ensureBlock := func(i int) *LineBlock { + switch stmt := f.Syntax.Stmt[i].(type) { + case *LineBlock: + return stmt + case *Line: + block := &LineBlock{ + Token: []string{"require"}, + Line: []*Line{stmt}, + } + stmt.Token = stmt.Token[1:] // remove "require" + stmt.InBlock = true + f.Syntax.Stmt[i] = block + return block + default: + panic(fmt.Sprintf("unexpected statement: %v", stmt)) + } + } + + var lastDirectBlock *LineBlock + if lastDirectIndex < 0 { + if lastIndirectIndex >= 0 { + lastDirectIndex = lastIndirectIndex + lastIndirectIndex++ + } else if lastRequireIndex >= 0 { + lastDirectIndex = lastRequireIndex + 1 + } else { + lastDirectIndex = len(f.Syntax.Stmt) + } + lastDirectBlock = insertBlock(lastDirectIndex) + } else { + lastDirectBlock = ensureBlock(lastDirectIndex) + } + + var lastIndirectBlock *LineBlock + if lastIndirectIndex < 0 { + lastIndirectIndex = lastDirectIndex + 1 + lastIndirectBlock = insertBlock(lastIndirectIndex) + } else { + lastIndirectBlock = ensureBlock(lastIndirectIndex) + } + + // Delete requirements we don't want anymore. + // Update versions and indirect comments on requirements we want to keep. + // If a requirement is in last{Direct,Indirect}Block with the wrong + // indirect marking after this, or if the requirement is in an single + // uncommented mixed block (oneFlatUncommentedBlock), move it to the + // correct block. + // + // Some blocks may be empty after this. Cleanup will remove them. + need := make(map[string]*Require) + for _, r := range req { + need[r.Mod.Path] = r + } + have := make(map[string]*Require) + for _, r := range f.Require { + path := r.Mod.Path + if need[path] == nil || have[path] != nil { + // Requirement not needed, or duplicate requirement. Delete. + r.markRemoved() + continue + } + have[r.Mod.Path] = r + r.setVersion(need[path].Mod.Version) + r.setIndirect(need[path].Indirect) + if need[path].Indirect && + (oneFlatUncommentedBlock || lineToBlock[r.Syntax] == lastDirectBlock) { + moveReq(r, lastIndirectBlock) + } else if !need[path].Indirect && + (oneFlatUncommentedBlock || lineToBlock[r.Syntax] == lastIndirectBlock) { + moveReq(r, lastDirectBlock) + } + } + + // Add new requirements. + for path, r := range need { + if have[path] == nil { + if r.Indirect { + moveReq(r, lastIndirectBlock) + } else { + moveReq(r, lastDirectBlock) + } + f.Require = append(f.Require, r) + } + } + + f.SortBlocks() +} + +func (f *File) DropRequire(path string) error { + for _, r := range f.Require { + if r.Mod.Path == path { + r.Syntax.markRemoved() + *r = Require{} + } + } + return nil +} + +// AddExclude adds a exclude statement to the mod file. Errors if the provided +// version is not a canonical version string +func (f *File) AddExclude(path, vers string) error { + if err := checkCanonicalVersion(path, vers); err != nil { + return err + } + + var hint *Line + for _, x := range f.Exclude { + if x.Mod.Path == path && x.Mod.Version == vers { + return nil + } + if x.Mod.Path == path { + hint = x.Syntax + } + } + + f.Exclude = append(f.Exclude, &Exclude{Mod: module.Version{Path: path, Version: vers}, Syntax: f.Syntax.addLine(hint, "exclude", AutoQuote(path), vers)}) + return nil +} + +func (f *File) DropExclude(path, vers string) error { + for _, x := range f.Exclude { + if x.Mod.Path == path && x.Mod.Version == vers { + x.Syntax.markRemoved() + *x = Exclude{} + } + } + return nil +} + +func (f *File) AddReplace(oldPath, oldVers, newPath, newVers string) error { + return addReplace(f.Syntax, &f.Replace, oldPath, oldVers, newPath, newVers) +} + +func addReplace(syntax *FileSyntax, replace *[]*Replace, oldPath, oldVers, newPath, newVers string) error { + need := true + old := module.Version{Path: oldPath, Version: oldVers} + new := module.Version{Path: newPath, Version: newVers} + tokens := []string{"replace", AutoQuote(oldPath)} + if oldVers != "" { + tokens = append(tokens, oldVers) + } + tokens = append(tokens, "=>", AutoQuote(newPath)) + if newVers != "" { + tokens = append(tokens, newVers) + } + + var hint *Line + for _, r := range *replace { + if r.Old.Path == oldPath && (oldVers == "" || r.Old.Version == oldVers) { + if need { + // Found replacement for old; update to use new. + r.New = new + syntax.updateLine(r.Syntax, tokens...) + need = false + continue + } + // Already added; delete other replacements for same. + r.Syntax.markRemoved() + *r = Replace{} + } + if r.Old.Path == oldPath { + hint = r.Syntax + } + } + if need { + *replace = append(*replace, &Replace{Old: old, New: new, Syntax: syntax.addLine(hint, tokens...)}) + } + return nil +} + +func (f *File) DropReplace(oldPath, oldVers string) error { + for _, r := range f.Replace { + if r.Old.Path == oldPath && r.Old.Version == oldVers { + r.Syntax.markRemoved() + *r = Replace{} + } + } + return nil +} + +// AddRetract adds a retract statement to the mod file. Errors if the provided +// version interval does not consist of canonical version strings +func (f *File) AddRetract(vi VersionInterval, rationale string) error { + var path string + if f.Module != nil { + path = f.Module.Mod.Path + } + if err := checkCanonicalVersion(path, vi.High); err != nil { + return err + } + if err := checkCanonicalVersion(path, vi.Low); err != nil { + return err + } + + r := &Retract{ + VersionInterval: vi, + } + if vi.Low == vi.High { + r.Syntax = f.Syntax.addLine(nil, "retract", AutoQuote(vi.Low)) + } else { + r.Syntax = f.Syntax.addLine(nil, "retract", "[", AutoQuote(vi.Low), ",", AutoQuote(vi.High), "]") + } + if rationale != "" { + for _, line := range strings.Split(rationale, "\n") { + com := Comment{Token: "// " + line} + r.Syntax.Comment().Before = append(r.Syntax.Comment().Before, com) + } + } + return nil +} + +func (f *File) DropRetract(vi VersionInterval) error { + for _, r := range f.Retract { + if r.VersionInterval == vi { + r.Syntax.markRemoved() + *r = Retract{} + } + } + return nil +} + +func (f *File) SortBlocks() { + f.removeDups() // otherwise sorting is unsafe + + // semanticSortForExcludeVersionV is the Go version (plus leading "v") at which + // lines in exclude blocks start to use semantic sort instead of lexicographic sort. + // See go.dev/issue/60028. + const semanticSortForExcludeVersionV = "v1.21" + useSemanticSortForExclude := f.Go != nil && semver.Compare("v"+f.Go.Version, semanticSortForExcludeVersionV) >= 0 + + for _, stmt := range f.Syntax.Stmt { + block, ok := stmt.(*LineBlock) + if !ok { + continue + } + less := lineLess + if block.Token[0] == "exclude" && useSemanticSortForExclude { + less = lineExcludeLess + } else if block.Token[0] == "retract" { + less = lineRetractLess + } + sort.SliceStable(block.Line, func(i, j int) bool { + return less(block.Line[i], block.Line[j]) + }) + } +} + +// removeDups removes duplicate exclude and replace directives. +// +// Earlier exclude directives take priority. +// +// Later replace directives take priority. +// +// require directives are not de-duplicated. That's left up to higher-level +// logic (MVS). +// +// retract directives are not de-duplicated since comments are +// meaningful, and versions may be retracted multiple times. +func (f *File) removeDups() { + removeDups(f.Syntax, &f.Exclude, &f.Replace) +} + +func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace) { + kill := make(map[*Line]bool) + + // Remove duplicate excludes. + if exclude != nil { + haveExclude := make(map[module.Version]bool) + for _, x := range *exclude { + if haveExclude[x.Mod] { + kill[x.Syntax] = true + continue + } + haveExclude[x.Mod] = true + } + var excl []*Exclude + for _, x := range *exclude { + if !kill[x.Syntax] { + excl = append(excl, x) + } + } + *exclude = excl + } + + // Remove duplicate replacements. + // Later replacements take priority over earlier ones. + haveReplace := make(map[module.Version]bool) + for i := len(*replace) - 1; i >= 0; i-- { + x := (*replace)[i] + if haveReplace[x.Old] { + kill[x.Syntax] = true + continue + } + haveReplace[x.Old] = true + } + var repl []*Replace + for _, x := range *replace { + if !kill[x.Syntax] { + repl = append(repl, x) + } + } + *replace = repl + + // Duplicate require and retract directives are not removed. + + // Drop killed statements from the syntax tree. + var stmts []Expr + for _, stmt := range syntax.Stmt { + switch stmt := stmt.(type) { + case *Line: + if kill[stmt] { + continue + } + case *LineBlock: + var lines []*Line + for _, line := range stmt.Line { + if !kill[line] { + lines = append(lines, line) + } + } + stmt.Line = lines + if len(lines) == 0 { + continue + } + } + stmts = append(stmts, stmt) + } + syntax.Stmt = stmts +} + +// lineLess returns whether li should be sorted before lj. It sorts +// lexicographically without assigning any special meaning to tokens. +func lineLess(li, lj *Line) bool { + for k := 0; k < len(li.Token) && k < len(lj.Token); k++ { + if li.Token[k] != lj.Token[k] { + return li.Token[k] < lj.Token[k] + } + } + return len(li.Token) < len(lj.Token) +} + +// lineExcludeLess reports whether li should be sorted before lj for lines in +// an "exclude" block. +func lineExcludeLess(li, lj *Line) bool { + if len(li.Token) != 2 || len(lj.Token) != 2 { + // Not a known exclude specification. + // Fall back to sorting lexicographically. + return lineLess(li, lj) + } + // An exclude specification has two tokens: ModulePath and Version. + // Compare module path by string order and version by semver rules. + if pi, pj := li.Token[0], lj.Token[0]; pi != pj { + return pi < pj + } + return semver.Compare(li.Token[1], lj.Token[1]) < 0 +} + +// lineRetractLess returns whether li should be sorted before lj for lines in +// a "retract" block. It treats each line as a version interval. Single versions +// are compared as if they were intervals with the same low and high version. +// Intervals are sorted in descending order, first by low version, then by +// high version, using semver.Compare. +func lineRetractLess(li, lj *Line) bool { + interval := func(l *Line) VersionInterval { + if len(l.Token) == 1 { + return VersionInterval{Low: l.Token[0], High: l.Token[0]} + } else if len(l.Token) == 5 && l.Token[0] == "[" && l.Token[2] == "," && l.Token[4] == "]" { + return VersionInterval{Low: l.Token[1], High: l.Token[3]} + } else { + // Line in unknown format. Treat as an invalid version. + return VersionInterval{} + } + } + vii := interval(li) + vij := interval(lj) + if cmp := semver.Compare(vii.Low, vij.Low); cmp != 0 { + return cmp > 0 + } + return semver.Compare(vii.High, vij.High) > 0 +} + +// checkCanonicalVersion returns a non-nil error if vers is not a canonical +// version string or does not match the major version of path. +// +// If path is non-empty, the error text suggests a format with a major version +// corresponding to the path. +func checkCanonicalVersion(path, vers string) error { + _, pathMajor, pathMajorOk := module.SplitPathVersion(path) + + if vers == "" || vers != module.CanonicalVersion(vers) { + if pathMajor == "" { + return &module.InvalidVersionError{ + Version: vers, + Err: fmt.Errorf("must be of the form v1.2.3"), + } + } + return &module.InvalidVersionError{ + Version: vers, + Err: fmt.Errorf("must be of the form %s.2.3", module.PathMajorPrefix(pathMajor)), + } + } + + if pathMajorOk { + if err := module.CheckPathMajor(vers, pathMajor); err != nil { + if pathMajor == "" { + // In this context, the user probably wrote "v2.3.4" when they meant + // "v2.3.4+incompatible". Suggest that instead of "v0 or v1". + return &module.InvalidVersionError{ + Version: vers, + Err: fmt.Errorf("should be %s+incompatible (or module %s/%v)", vers, path, semver.Major(vers)), + } + } + return err + } + } + + return nil +} diff --git a/vendor/golang.org/x/mod/modfile/work.go b/vendor/golang.org/x/mod/modfile/work.go new file mode 100644 index 00000000..d7b99376 --- /dev/null +++ b/vendor/golang.org/x/mod/modfile/work.go @@ -0,0 +1,285 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modfile + +import ( + "fmt" + "sort" + "strings" +) + +// A WorkFile is the parsed, interpreted form of a go.work file. +type WorkFile struct { + Go *Go + Toolchain *Toolchain + Use []*Use + Replace []*Replace + + Syntax *FileSyntax +} + +// A Use is a single directory statement. +type Use struct { + Path string // Use path of module. + ModulePath string // Module path in the comment. + Syntax *Line +} + +// ParseWork parses and returns a go.work file. +// +// file is the name of the file, used in positions and errors. +// +// data is the content of the file. +// +// fix is an optional function that canonicalizes module versions. +// If fix is nil, all module versions must be canonical ([module.CanonicalVersion] +// must return the same string). +func ParseWork(file string, data []byte, fix VersionFixer) (*WorkFile, error) { + fs, err := parse(file, data) + if err != nil { + return nil, err + } + f := &WorkFile{ + Syntax: fs, + } + var errs ErrorList + + for _, x := range fs.Stmt { + switch x := x.(type) { + case *Line: + f.add(&errs, x, x.Token[0], x.Token[1:], fix) + + case *LineBlock: + if len(x.Token) > 1 { + errs = append(errs, Error{ + Filename: file, + Pos: x.Start, + Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")), + }) + continue + } + switch x.Token[0] { + default: + errs = append(errs, Error{ + Filename: file, + Pos: x.Start, + Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")), + }) + continue + case "use", "replace": + for _, l := range x.Line { + f.add(&errs, l, x.Token[0], l.Token, fix) + } + } + } + } + + if len(errs) > 0 { + return nil, errs + } + return f, nil +} + +// Cleanup cleans up the file f after any edit operations. +// To avoid quadratic behavior, modifications like [WorkFile.DropRequire] +// clear the entry but do not remove it from the slice. +// Cleanup cleans out all the cleared entries. +func (f *WorkFile) Cleanup() { + w := 0 + for _, r := range f.Use { + if r.Path != "" { + f.Use[w] = r + w++ + } + } + f.Use = f.Use[:w] + + w = 0 + for _, r := range f.Replace { + if r.Old.Path != "" { + f.Replace[w] = r + w++ + } + } + f.Replace = f.Replace[:w] + + f.Syntax.Cleanup() +} + +func (f *WorkFile) AddGoStmt(version string) error { + if !GoVersionRE.MatchString(version) { + return fmt.Errorf("invalid language version %q", version) + } + if f.Go == nil { + stmt := &Line{Token: []string{"go", version}} + f.Go = &Go{ + Version: version, + Syntax: stmt, + } + // Find the first non-comment-only block and add + // the go statement before it. That will keep file comments at the top. + i := 0 + for i = 0; i < len(f.Syntax.Stmt); i++ { + if _, ok := f.Syntax.Stmt[i].(*CommentBlock); !ok { + break + } + } + f.Syntax.Stmt = append(append(f.Syntax.Stmt[:i:i], stmt), f.Syntax.Stmt[i:]...) + } else { + f.Go.Version = version + f.Syntax.updateLine(f.Go.Syntax, "go", version) + } + return nil +} + +func (f *WorkFile) AddToolchainStmt(name string) error { + if !ToolchainRE.MatchString(name) { + return fmt.Errorf("invalid toolchain name %q", name) + } + if f.Toolchain == nil { + stmt := &Line{Token: []string{"toolchain", name}} + f.Toolchain = &Toolchain{ + Name: name, + Syntax: stmt, + } + // Find the go line and add the toolchain line after it. + // Or else find the first non-comment-only block and add + // the toolchain line before it. That will keep file comments at the top. + i := 0 + for i = 0; i < len(f.Syntax.Stmt); i++ { + if line, ok := f.Syntax.Stmt[i].(*Line); ok && len(line.Token) > 0 && line.Token[0] == "go" { + i++ + goto Found + } + } + for i = 0; i < len(f.Syntax.Stmt); i++ { + if _, ok := f.Syntax.Stmt[i].(*CommentBlock); !ok { + break + } + } + Found: + f.Syntax.Stmt = append(append(f.Syntax.Stmt[:i:i], stmt), f.Syntax.Stmt[i:]...) + } else { + f.Toolchain.Name = name + f.Syntax.updateLine(f.Toolchain.Syntax, "toolchain", name) + } + return nil +} + +// DropGoStmt deletes the go statement from the file. +func (f *WorkFile) DropGoStmt() { + if f.Go != nil { + f.Go.Syntax.markRemoved() + f.Go = nil + } +} + +// DropToolchainStmt deletes the toolchain statement from the file. +func (f *WorkFile) DropToolchainStmt() { + if f.Toolchain != nil { + f.Toolchain.Syntax.markRemoved() + f.Toolchain = nil + } +} + +func (f *WorkFile) AddUse(diskPath, modulePath string) error { + need := true + for _, d := range f.Use { + if d.Path == diskPath { + if need { + d.ModulePath = modulePath + f.Syntax.updateLine(d.Syntax, "use", AutoQuote(diskPath)) + need = false + } else { + d.Syntax.markRemoved() + *d = Use{} + } + } + } + + if need { + f.AddNewUse(diskPath, modulePath) + } + return nil +} + +func (f *WorkFile) AddNewUse(diskPath, modulePath string) { + line := f.Syntax.addLine(nil, "use", AutoQuote(diskPath)) + f.Use = append(f.Use, &Use{Path: diskPath, ModulePath: modulePath, Syntax: line}) +} + +func (f *WorkFile) SetUse(dirs []*Use) { + need := make(map[string]string) + for _, d := range dirs { + need[d.Path] = d.ModulePath + } + + for _, d := range f.Use { + if modulePath, ok := need[d.Path]; ok { + d.ModulePath = modulePath + } else { + d.Syntax.markRemoved() + *d = Use{} + } + } + + // TODO(#45713): Add module path to comment. + + for diskPath, modulePath := range need { + f.AddNewUse(diskPath, modulePath) + } + f.SortBlocks() +} + +func (f *WorkFile) DropUse(path string) error { + for _, d := range f.Use { + if d.Path == path { + d.Syntax.markRemoved() + *d = Use{} + } + } + return nil +} + +func (f *WorkFile) AddReplace(oldPath, oldVers, newPath, newVers string) error { + return addReplace(f.Syntax, &f.Replace, oldPath, oldVers, newPath, newVers) +} + +func (f *WorkFile) DropReplace(oldPath, oldVers string) error { + for _, r := range f.Replace { + if r.Old.Path == oldPath && r.Old.Version == oldVers { + r.Syntax.markRemoved() + *r = Replace{} + } + } + return nil +} + +func (f *WorkFile) SortBlocks() { + f.removeDups() // otherwise sorting is unsafe + + for _, stmt := range f.Syntax.Stmt { + block, ok := stmt.(*LineBlock) + if !ok { + continue + } + sort.SliceStable(block.Line, func(i, j int) bool { + return lineLess(block.Line[i], block.Line[j]) + }) + } +} + +// removeDups removes duplicate replace directives. +// +// Later replace directives take priority. +// +// require directives are not de-duplicated. That's left up to higher-level +// logic (MVS). +// +// retract directives are not de-duplicated since comments are +// meaningful, and versions may be retracted multiple times. +func (f *WorkFile) removeDups() { + removeDups(f.Syntax, nil, &f.Replace) +} diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go new file mode 100644 index 00000000..2a364b22 --- /dev/null +++ b/vendor/golang.org/x/mod/module/module.go @@ -0,0 +1,841 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package module defines the module.Version type along with support code. +// +// The [module.Version] type is a simple Path, Version pair: +// +// type Version struct { +// Path string +// Version string +// } +// +// There are no restrictions imposed directly by use of this structure, +// but additional checking functions, most notably [Check], verify that +// a particular path, version pair is valid. +// +// # Escaped Paths +// +// Module paths appear as substrings of file system paths +// (in the download cache) and of web server URLs in the proxy protocol. +// In general we cannot rely on file systems to be case-sensitive, +// nor can we rely on web servers, since they read from file systems. +// That is, we cannot rely on the file system to keep rsc.io/QUOTE +// and rsc.io/quote separate. Windows and macOS don't. +// Instead, we must never require two different casings of a file path. +// Because we want the download cache to match the proxy protocol, +// and because we want the proxy protocol to be possible to serve +// from a tree of static files (which might be stored on a case-insensitive +// file system), the proxy protocol must never require two different casings +// of a URL path either. +// +// One possibility would be to make the escaped form be the lowercase +// hexadecimal encoding of the actual path bytes. This would avoid ever +// needing different casings of a file path, but it would be fairly illegible +// to most programmers when those paths appeared in the file system +// (including in file paths in compiler errors and stack traces) +// in web server logs, and so on. Instead, we want a safe escaped form that +// leaves most paths unaltered. +// +// The safe escaped form is to replace every uppercase letter +// with an exclamation mark followed by the letter's lowercase equivalent. +// +// For example, +// +// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go. +// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy +// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus. +// +// Import paths that avoid upper-case letters are left unchanged. +// Note that because import paths are ASCII-only and avoid various +// problematic punctuation (like : < and >), the escaped form is also ASCII-only +// and avoids the same problematic punctuation. +// +// Import paths have never allowed exclamation marks, so there is no +// need to define how to escape a literal !. +// +// # Unicode Restrictions +// +// Today, paths are disallowed from using Unicode. +// +// Although paths are currently disallowed from using Unicode, +// we would like at some point to allow Unicode letters as well, to assume that +// file systems and URLs are Unicode-safe (storing UTF-8), and apply +// the !-for-uppercase convention for escaping them in the file system. +// But there are at least two subtle considerations. +// +// First, note that not all case-fold equivalent distinct runes +// form an upper/lower pair. +// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin) +// are three distinct runes that case-fold to each other. +// When we do add Unicode letters, we must not assume that upper/lower +// are the only case-equivalent pairs. +// Perhaps the Kelvin symbol would be disallowed entirely, for example. +// Or perhaps it would escape as "!!k", or perhaps as "(212A)". +// +// Second, it would be nice to allow Unicode marks as well as letters, +// but marks include combining marks, and then we must deal not +// only with case folding but also normalization: both U+00E9 ('é') +// and U+0065 U+0301 ('e' followed by combining acute accent) +// look the same on the page and are treated by some file systems +// as the same path. If we do allow Unicode marks in paths, there +// must be some kind of normalization to allow only one canonical +// encoding of any character used in an import path. +package module + +// IMPORTANT NOTE +// +// This file essentially defines the set of valid import paths for the go command. +// There are many subtle considerations, including Unicode ambiguity, +// security, network, and file system representations. +// +// This file also defines the set of valid module path and version combinations, +// another topic with many subtle considerations. +// +// Changes to the semantics in this file require approval from rsc. + +import ( + "errors" + "fmt" + "path" + "sort" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/mod/semver" +) + +// A Version (for clients, a module.Version) is defined by a module path and version pair. +// These are stored in their plain (unescaped) form. +type Version struct { + // Path is a module path, like "golang.org/x/text" or "rsc.io/quote/v2". + Path string + + // Version is usually a semantic version in canonical form. + // There are three exceptions to this general rule. + // First, the top-level target of a build has no specific version + // and uses Version = "". + // Second, during MVS calculations the version "none" is used + // to represent the decision to take no version of a given module. + // Third, filesystem paths found in "replace" directives are + // represented by a path with an empty version. + Version string `json:",omitempty"` +} + +// String returns a representation of the Version suitable for logging +// (Path@Version, or just Path if Version is empty). +func (m Version) String() string { + if m.Version == "" { + return m.Path + } + return m.Path + "@" + m.Version +} + +// A ModuleError indicates an error specific to a module. +type ModuleError struct { + Path string + Version string + Err error +} + +// VersionError returns a [ModuleError] derived from a [Version] and error, +// or err itself if it is already such an error. +func VersionError(v Version, err error) error { + var mErr *ModuleError + if errors.As(err, &mErr) && mErr.Path == v.Path && mErr.Version == v.Version { + return err + } + return &ModuleError{ + Path: v.Path, + Version: v.Version, + Err: err, + } +} + +func (e *ModuleError) Error() string { + if v, ok := e.Err.(*InvalidVersionError); ok { + return fmt.Sprintf("%s@%s: invalid %s: %v", e.Path, v.Version, v.noun(), v.Err) + } + if e.Version != "" { + return fmt.Sprintf("%s@%s: %v", e.Path, e.Version, e.Err) + } + return fmt.Sprintf("module %s: %v", e.Path, e.Err) +} + +func (e *ModuleError) Unwrap() error { return e.Err } + +// An InvalidVersionError indicates an error specific to a version, with the +// module path unknown or specified externally. +// +// A [ModuleError] may wrap an InvalidVersionError, but an InvalidVersionError +// must not wrap a ModuleError. +type InvalidVersionError struct { + Version string + Pseudo bool + Err error +} + +// noun returns either "version" or "pseudo-version", depending on whether +// e.Version is a pseudo-version. +func (e *InvalidVersionError) noun() string { + if e.Pseudo { + return "pseudo-version" + } + return "version" +} + +func (e *InvalidVersionError) Error() string { + return fmt.Sprintf("%s %q invalid: %s", e.noun(), e.Version, e.Err) +} + +func (e *InvalidVersionError) Unwrap() error { return e.Err } + +// An InvalidPathError indicates a module, import, or file path doesn't +// satisfy all naming constraints. See [CheckPath], [CheckImportPath], +// and [CheckFilePath] for specific restrictions. +type InvalidPathError struct { + Kind string // "module", "import", or "file" + Path string + Err error +} + +func (e *InvalidPathError) Error() string { + return fmt.Sprintf("malformed %s path %q: %v", e.Kind, e.Path, e.Err) +} + +func (e *InvalidPathError) Unwrap() error { return e.Err } + +// Check checks that a given module path, version pair is valid. +// In addition to the path being a valid module path +// and the version being a valid semantic version, +// the two must correspond. +// For example, the path "yaml/v2" only corresponds to +// semantic versions beginning with "v2.". +func Check(path, version string) error { + if err := CheckPath(path); err != nil { + return err + } + if !semver.IsValid(version) { + return &ModuleError{ + Path: path, + Err: &InvalidVersionError{Version: version, Err: errors.New("not a semantic version")}, + } + } + _, pathMajor, _ := SplitPathVersion(path) + if err := CheckPathMajor(version, pathMajor); err != nil { + return &ModuleError{Path: path, Err: err} + } + return nil +} + +// firstPathOK reports whether r can appear in the first element of a module path. +// The first element of the path must be an LDH domain name, at least for now. +// To avoid case ambiguity, the domain name must be entirely lower case. +func firstPathOK(r rune) bool { + return r == '-' || r == '.' || + '0' <= r && r <= '9' || + 'a' <= r && r <= 'z' +} + +// modPathOK reports whether r can appear in a module path element. +// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~. +// +// This matches what "go get" has historically recognized in import paths, +// and avoids confusing sequences like '%20' or '+' that would change meaning +// if used in a URL. +// +// TODO(rsc): We would like to allow Unicode letters, but that requires additional +// care in the safe encoding (see "escaped paths" above). +func modPathOK(r rune) bool { + if r < utf8.RuneSelf { + return r == '-' || r == '.' || r == '_' || r == '~' || + '0' <= r && r <= '9' || + 'A' <= r && r <= 'Z' || + 'a' <= r && r <= 'z' + } + return false +} + +// importPathOK reports whether r can appear in a package import path element. +// +// Import paths are intermediate between module paths and file paths: we allow +// disallow characters that would be confusing or ambiguous as arguments to +// 'go get' (such as '@' and ' ' ), but allow certain characters that are +// otherwise-unambiguous on the command line and historically used for some +// binary names (such as '++' as a suffix for compiler binaries and wrappers). +func importPathOK(r rune) bool { + return modPathOK(r) || r == '+' +} + +// fileNameOK reports whether r can appear in a file name. +// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters. +// If we expand the set of allowed characters here, we have to +// work harder at detecting potential case-folding and normalization collisions. +// See note about "escaped paths" above. +func fileNameOK(r rune) bool { + if r < utf8.RuneSelf { + // Entire set of ASCII punctuation, from which we remove characters: + // ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~ + // We disallow some shell special characters: " ' * < > ? ` | + // (Note that some of those are disallowed by the Windows file system as well.) + // We also disallow path separators / : and \ (fileNameOK is only called on path element characters). + // We allow spaces (U+0020) in file names. + const allowed = "!#$%&()+,-.=@[]^_{}~ " + if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' { + return true + } + return strings.ContainsRune(allowed, r) + } + // It may be OK to add more ASCII punctuation here, but only carefully. + // For example Windows disallows < > \, and macOS disallows :, so we must not allow those. + return unicode.IsLetter(r) +} + +// CheckPath checks that a module path is valid. +// A valid module path is a valid import path, as checked by [CheckImportPath], +// with three additional constraints. +// First, the leading path element (up to the first slash, if any), +// by convention a domain name, must contain only lower-case ASCII letters, +// ASCII digits, dots (U+002E), and dashes (U+002D); +// it must contain at least one dot and cannot start with a dash. +// Second, for a final path element of the form /vN, where N looks numeric +// (ASCII digits and dots) must not begin with a leading zero, must not be /v1, +// and must not contain any dots. For paths beginning with "gopkg.in/", +// this second requirement is replaced by a requirement that the path +// follow the gopkg.in server's conventions. +// Third, no path element may begin with a dot. +func CheckPath(path string) (err error) { + defer func() { + if err != nil { + err = &InvalidPathError{Kind: "module", Path: path, Err: err} + } + }() + + if err := checkPath(path, modulePath); err != nil { + return err + } + i := strings.Index(path, "/") + if i < 0 { + i = len(path) + } + if i == 0 { + return fmt.Errorf("leading slash") + } + if !strings.Contains(path[:i], ".") { + return fmt.Errorf("missing dot in first path element") + } + if path[0] == '-' { + return fmt.Errorf("leading dash in first path element") + } + for _, r := range path[:i] { + if !firstPathOK(r) { + return fmt.Errorf("invalid char %q in first path element", r) + } + } + if _, _, ok := SplitPathVersion(path); !ok { + return fmt.Errorf("invalid version") + } + return nil +} + +// CheckImportPath checks that an import path is valid. +// +// A valid import path consists of one or more valid path elements +// separated by slashes (U+002F). (It must not begin with nor end in a slash.) +// +// A valid path element is a non-empty string made up of +// ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~. +// It must not end with a dot (U+002E), nor contain two dots in a row. +// +// The element prefix up to the first dot must not be a reserved file name +// on Windows, regardless of case (CON, com1, NuL, and so on). The element +// must not have a suffix of a tilde followed by one or more ASCII digits +// (to exclude paths elements that look like Windows short-names). +// +// CheckImportPath may be less restrictive in the future, but see the +// top-level package documentation for additional information about +// subtleties of Unicode. +func CheckImportPath(path string) error { + if err := checkPath(path, importPath); err != nil { + return &InvalidPathError{Kind: "import", Path: path, Err: err} + } + return nil +} + +// pathKind indicates what kind of path we're checking. Module paths, +// import paths, and file paths have different restrictions. +type pathKind int + +const ( + modulePath pathKind = iota + importPath + filePath +) + +// checkPath checks that a general path is valid. kind indicates what +// specific constraints should be applied. +// +// checkPath returns an error describing why the path is not valid. +// Because these checks apply to module, import, and file paths, +// and because other checks may be applied, the caller is expected to wrap +// this error with [InvalidPathError]. +func checkPath(path string, kind pathKind) error { + if !utf8.ValidString(path) { + return fmt.Errorf("invalid UTF-8") + } + if path == "" { + return fmt.Errorf("empty string") + } + if path[0] == '-' && kind != filePath { + return fmt.Errorf("leading dash") + } + if strings.Contains(path, "//") { + return fmt.Errorf("double slash") + } + if path[len(path)-1] == '/' { + return fmt.Errorf("trailing slash") + } + elemStart := 0 + for i, r := range path { + if r == '/' { + if err := checkElem(path[elemStart:i], kind); err != nil { + return err + } + elemStart = i + 1 + } + } + if err := checkElem(path[elemStart:], kind); err != nil { + return err + } + return nil +} + +// checkElem checks whether an individual path element is valid. +func checkElem(elem string, kind pathKind) error { + if elem == "" { + return fmt.Errorf("empty path element") + } + if strings.Count(elem, ".") == len(elem) { + return fmt.Errorf("invalid path element %q", elem) + } + if elem[0] == '.' && kind == modulePath { + return fmt.Errorf("leading dot in path element") + } + if elem[len(elem)-1] == '.' { + return fmt.Errorf("trailing dot in path element") + } + for _, r := range elem { + ok := false + switch kind { + case modulePath: + ok = modPathOK(r) + case importPath: + ok = importPathOK(r) + case filePath: + ok = fileNameOK(r) + default: + panic(fmt.Sprintf("internal error: invalid kind %v", kind)) + } + if !ok { + return fmt.Errorf("invalid char %q", r) + } + } + + // Windows disallows a bunch of path elements, sadly. + // See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file + short := elem + if i := strings.Index(short, "."); i >= 0 { + short = short[:i] + } + for _, bad := range badWindowsNames { + if strings.EqualFold(bad, short) { + return fmt.Errorf("%q disallowed as path element component on Windows", short) + } + } + + if kind == filePath { + // don't check for Windows short-names in file names. They're + // only an issue for import paths. + return nil + } + + // Reject path components that look like Windows short-names. + // Those usually end in a tilde followed by one or more ASCII digits. + if tilde := strings.LastIndexByte(short, '~'); tilde >= 0 && tilde < len(short)-1 { + suffix := short[tilde+1:] + suffixIsDigits := true + for _, r := range suffix { + if r < '0' || r > '9' { + suffixIsDigits = false + break + } + } + if suffixIsDigits { + return fmt.Errorf("trailing tilde and digits in path element") + } + } + + return nil +} + +// CheckFilePath checks that a slash-separated file path is valid. +// The definition of a valid file path is the same as the definition +// of a valid import path except that the set of allowed characters is larger: +// all Unicode letters, ASCII digits, the ASCII space character (U+0020), +// and the ASCII punctuation characters +// “!#$%&()+,-.=@[]^_{}~”. +// (The excluded punctuation characters, " * < > ? ` ' | / \ and :, +// have special meanings in certain shells or operating systems.) +// +// CheckFilePath may be less restrictive in the future, but see the +// top-level package documentation for additional information about +// subtleties of Unicode. +func CheckFilePath(path string) error { + if err := checkPath(path, filePath); err != nil { + return &InvalidPathError{Kind: "file", Path: path, Err: err} + } + return nil +} + +// badWindowsNames are the reserved file path elements on Windows. +// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file +var badWindowsNames = []string{ + "CON", + "PRN", + "AUX", + "NUL", + "COM1", + "COM2", + "COM3", + "COM4", + "COM5", + "COM6", + "COM7", + "COM8", + "COM9", + "LPT1", + "LPT2", + "LPT3", + "LPT4", + "LPT5", + "LPT6", + "LPT7", + "LPT8", + "LPT9", +} + +// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path +// and version is either empty or "/vN" for N >= 2. +// As a special case, gopkg.in paths are recognized directly; +// they require ".vN" instead of "/vN", and for all N, not just N >= 2. +// SplitPathVersion returns with ok = false when presented with +// a path whose last path element does not satisfy the constraints +// applied by [CheckPath], such as "example.com/pkg/v1" or "example.com/pkg/v1.2". +func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) { + if strings.HasPrefix(path, "gopkg.in/") { + return splitGopkgIn(path) + } + + i := len(path) + dot := false + for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') { + if path[i-1] == '.' { + dot = true + } + i-- + } + if i <= 1 || i == len(path) || path[i-1] != 'v' || path[i-2] != '/' { + return path, "", true + } + prefix, pathMajor = path[:i-2], path[i-2:] + if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" { + return path, "", false + } + return prefix, pathMajor, true +} + +// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths. +func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) { + if !strings.HasPrefix(path, "gopkg.in/") { + return path, "", false + } + i := len(path) + if strings.HasSuffix(path, "-unstable") { + i -= len("-unstable") + } + for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') { + i-- + } + if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' { + // All gopkg.in paths must end in vN for some N. + return path, "", false + } + prefix, pathMajor = path[:i-2], path[i-2:] + if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" { + return path, "", false + } + return prefix, pathMajor, true +} + +// MatchPathMajor reports whether the semantic version v +// matches the path major version pathMajor. +// +// MatchPathMajor returns true if and only if [CheckPathMajor] returns nil. +func MatchPathMajor(v, pathMajor string) bool { + return CheckPathMajor(v, pathMajor) == nil +} + +// CheckPathMajor returns a non-nil error if the semantic version v +// does not match the path major version pathMajor. +func CheckPathMajor(v, pathMajor string) error { + // TODO(jayconrod): return errors or panic for invalid inputs. This function + // (and others) was covered by integration tests for cmd/go, and surrounding + // code protected against invalid inputs like non-canonical versions. + if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { + pathMajor = strings.TrimSuffix(pathMajor, "-unstable") + } + if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" { + // Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1. + // For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405. + return nil + } + m := semver.Major(v) + if pathMajor == "" { + if m == "v0" || m == "v1" || semver.Build(v) == "+incompatible" { + return nil + } + pathMajor = "v0 or v1" + } else if pathMajor[0] == '/' || pathMajor[0] == '.' { + if m == pathMajor[1:] { + return nil + } + pathMajor = pathMajor[1:] + } + return &InvalidVersionError{ + Version: v, + Err: fmt.Errorf("should be %s, not %s", pathMajor, semver.Major(v)), + } +} + +// PathMajorPrefix returns the major-version tag prefix implied by pathMajor. +// An empty PathMajorPrefix allows either v0 or v1. +// +// Note that [MatchPathMajor] may accept some versions that do not actually begin +// with this prefix: namely, it accepts a 'v0.0.0-' prefix for a '.v1' +// pathMajor, even though that pathMajor implies 'v1' tagging. +func PathMajorPrefix(pathMajor string) string { + if pathMajor == "" { + return "" + } + if pathMajor[0] != '/' && pathMajor[0] != '.' { + panic("pathMajor suffix " + pathMajor + " passed to PathMajorPrefix lacks separator") + } + if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { + pathMajor = strings.TrimSuffix(pathMajor, "-unstable") + } + m := pathMajor[1:] + if m != semver.Major(m) { + panic("pathMajor suffix " + pathMajor + "passed to PathMajorPrefix is not a valid major version") + } + return m +} + +// CanonicalVersion returns the canonical form of the version string v. +// It is the same as [semver.Canonical] except that it preserves the special build suffix "+incompatible". +func CanonicalVersion(v string) string { + cv := semver.Canonical(v) + if semver.Build(v) == "+incompatible" { + cv += "+incompatible" + } + return cv +} + +// Sort sorts the list by Path, breaking ties by comparing [Version] fields. +// The Version fields are interpreted as semantic versions (using [semver.Compare]) +// optionally followed by a tie-breaking suffix introduced by a slash character, +// like in "v0.0.1/go.mod". +func Sort(list []Version) { + sort.Slice(list, func(i, j int) bool { + mi := list[i] + mj := list[j] + if mi.Path != mj.Path { + return mi.Path < mj.Path + } + // To help go.sum formatting, allow version/file. + // Compare semver prefix by semver rules, + // file by string order. + vi := mi.Version + vj := mj.Version + var fi, fj string + if k := strings.Index(vi, "/"); k >= 0 { + vi, fi = vi[:k], vi[k:] + } + if k := strings.Index(vj, "/"); k >= 0 { + vj, fj = vj[:k], vj[k:] + } + if vi != vj { + return semver.Compare(vi, vj) < 0 + } + return fi < fj + }) +} + +// EscapePath returns the escaped form of the given module path. +// It fails if the module path is invalid. +func EscapePath(path string) (escaped string, err error) { + if err := CheckPath(path); err != nil { + return "", err + } + + return escapeString(path) +} + +// EscapeVersion returns the escaped form of the given module version. +// Versions are allowed to be in non-semver form but must be valid file names +// and not contain exclamation marks. +func EscapeVersion(v string) (escaped string, err error) { + if err := checkElem(v, filePath); err != nil || strings.Contains(v, "!") { + return "", &InvalidVersionError{ + Version: v, + Err: fmt.Errorf("disallowed version string"), + } + } + return escapeString(v) +} + +func escapeString(s string) (escaped string, err error) { + haveUpper := false + for _, r := range s { + if r == '!' || r >= utf8.RuneSelf { + // This should be disallowed by CheckPath, but diagnose anyway. + // The correctness of the escaping loop below depends on it. + return "", fmt.Errorf("internal error: inconsistency in EscapePath") + } + if 'A' <= r && r <= 'Z' { + haveUpper = true + } + } + + if !haveUpper { + return s, nil + } + + var buf []byte + for _, r := range s { + if 'A' <= r && r <= 'Z' { + buf = append(buf, '!', byte(r+'a'-'A')) + } else { + buf = append(buf, byte(r)) + } + } + return string(buf), nil +} + +// UnescapePath returns the module path for the given escaped path. +// It fails if the escaped path is invalid or describes an invalid path. +func UnescapePath(escaped string) (path string, err error) { + path, ok := unescapeString(escaped) + if !ok { + return "", fmt.Errorf("invalid escaped module path %q", escaped) + } + if err := CheckPath(path); err != nil { + return "", fmt.Errorf("invalid escaped module path %q: %v", escaped, err) + } + return path, nil +} + +// UnescapeVersion returns the version string for the given escaped version. +// It fails if the escaped form is invalid or describes an invalid version. +// Versions are allowed to be in non-semver form but must be valid file names +// and not contain exclamation marks. +func UnescapeVersion(escaped string) (v string, err error) { + v, ok := unescapeString(escaped) + if !ok { + return "", fmt.Errorf("invalid escaped version %q", escaped) + } + if err := checkElem(v, filePath); err != nil { + return "", fmt.Errorf("invalid escaped version %q: %v", v, err) + } + return v, nil +} + +func unescapeString(escaped string) (string, bool) { + var buf []byte + + bang := false + for _, r := range escaped { + if r >= utf8.RuneSelf { + return "", false + } + if bang { + bang = false + if r < 'a' || 'z' < r { + return "", false + } + buf = append(buf, byte(r+'A'-'a')) + continue + } + if r == '!' { + bang = true + continue + } + if 'A' <= r && r <= 'Z' { + return "", false + } + buf = append(buf, byte(r)) + } + if bang { + return "", false + } + return string(buf), true +} + +// MatchPrefixPatterns reports whether any path prefix of target matches one of +// the glob patterns (as defined by [path.Match]) in the comma-separated globs +// list. This implements the algorithm used when matching a module path to the +// GOPRIVATE environment variable, as described by 'go help module-private'. +// +// It ignores any empty or malformed patterns in the list. +// Trailing slashes on patterns are ignored. +func MatchPrefixPatterns(globs, target string) bool { + for globs != "" { + // Extract next non-empty glob in comma-separated list. + var glob string + if i := strings.Index(globs, ","); i >= 0 { + glob, globs = globs[:i], globs[i+1:] + } else { + glob, globs = globs, "" + } + glob = strings.TrimSuffix(glob, "/") + if glob == "" { + continue + } + + // A glob with N+1 path elements (N slashes) needs to be matched + // against the first N+1 path elements of target, + // which end just before the N+1'th slash. + n := strings.Count(glob, "/") + prefix := target + // Walk target, counting slashes, truncating at the N+1'th slash. + for i := 0; i < len(target); i++ { + if target[i] == '/' { + if n == 0 { + prefix = target[:i] + break + } + n-- + } + } + if n > 0 { + // Not enough prefix elements. + continue + } + matched, _ := path.Match(glob, prefix) + if matched { + return true + } + } + return false +} diff --git a/vendor/golang.org/x/mod/module/pseudo.go b/vendor/golang.org/x/mod/module/pseudo.go new file mode 100644 index 00000000..9cf19d32 --- /dev/null +++ b/vendor/golang.org/x/mod/module/pseudo.go @@ -0,0 +1,250 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Pseudo-versions +// +// Code authors are expected to tag the revisions they want users to use, +// including prereleases. However, not all authors tag versions at all, +// and not all commits a user might want to try will have tags. +// A pseudo-version is a version with a special form that allows us to +// address an untagged commit and order that version with respect to +// other versions we might encounter. +// +// A pseudo-version takes one of the general forms: +// +// (1) vX.0.0-yyyymmddhhmmss-abcdef123456 +// (2) vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456 +// (3) vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456+incompatible +// (4) vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456 +// (5) vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456+incompatible +// +// If there is no recently tagged version with the right major version vX, +// then form (1) is used, creating a space of pseudo-versions at the bottom +// of the vX version range, less than any tagged version, including the unlikely v0.0.0. +// +// If the most recent tagged version before the target commit is vX.Y.Z or vX.Y.Z+incompatible, +// then the pseudo-version uses form (2) or (3), making it a prerelease for the next +// possible semantic version after vX.Y.Z. The leading 0 segment in the prerelease string +// ensures that the pseudo-version compares less than possible future explicit prereleases +// like vX.Y.(Z+1)-rc1 or vX.Y.(Z+1)-1. +// +// If the most recent tagged version before the target commit is vX.Y.Z-pre or vX.Y.Z-pre+incompatible, +// then the pseudo-version uses form (4) or (5), making it a slightly later prerelease. + +package module + +import ( + "errors" + "fmt" + "strings" + "time" + + "golang.org/x/mod/internal/lazyregexp" + "golang.org/x/mod/semver" +) + +var pseudoVersionRE = lazyregexp.New(`^v[0-9]+\.(0\.0-|\d+\.\d+-([^+]*\.)?0\.)\d{14}-[A-Za-z0-9]+(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$`) + +const PseudoVersionTimestampFormat = "20060102150405" + +// PseudoVersion returns a pseudo-version for the given major version ("v1") +// preexisting older tagged version ("" or "v1.2.3" or "v1.2.3-pre"), revision time, +// and revision identifier (usually a 12-byte commit hash prefix). +func PseudoVersion(major, older string, t time.Time, rev string) string { + if major == "" { + major = "v0" + } + segment := fmt.Sprintf("%s-%s", t.UTC().Format(PseudoVersionTimestampFormat), rev) + build := semver.Build(older) + older = semver.Canonical(older) + if older == "" { + return major + ".0.0-" + segment // form (1) + } + if semver.Prerelease(older) != "" { + return older + ".0." + segment + build // form (4), (5) + } + + // Form (2), (3). + // Extract patch from vMAJOR.MINOR.PATCH + i := strings.LastIndex(older, ".") + 1 + v, patch := older[:i], older[i:] + + // Reassemble. + return v + incDecimal(patch) + "-0." + segment + build +} + +// ZeroPseudoVersion returns a pseudo-version with a zero timestamp and +// revision, which may be used as a placeholder. +func ZeroPseudoVersion(major string) string { + return PseudoVersion(major, "", time.Time{}, "000000000000") +} + +// incDecimal returns the decimal string incremented by 1. +func incDecimal(decimal string) string { + // Scan right to left turning 9s to 0s until you find a digit to increment. + digits := []byte(decimal) + i := len(digits) - 1 + for ; i >= 0 && digits[i] == '9'; i-- { + digits[i] = '0' + } + if i >= 0 { + digits[i]++ + } else { + // digits is all zeros + digits[0] = '1' + digits = append(digits, '0') + } + return string(digits) +} + +// decDecimal returns the decimal string decremented by 1, or the empty string +// if the decimal is all zeroes. +func decDecimal(decimal string) string { + // Scan right to left turning 0s to 9s until you find a digit to decrement. + digits := []byte(decimal) + i := len(digits) - 1 + for ; i >= 0 && digits[i] == '0'; i-- { + digits[i] = '9' + } + if i < 0 { + // decimal is all zeros + return "" + } + if i == 0 && digits[i] == '1' && len(digits) > 1 { + digits = digits[1:] + } else { + digits[i]-- + } + return string(digits) +} + +// IsPseudoVersion reports whether v is a pseudo-version. +func IsPseudoVersion(v string) bool { + return strings.Count(v, "-") >= 2 && semver.IsValid(v) && pseudoVersionRE.MatchString(v) +} + +// IsZeroPseudoVersion returns whether v is a pseudo-version with a zero base, +// timestamp, and revision, as returned by [ZeroPseudoVersion]. +func IsZeroPseudoVersion(v string) bool { + return v == ZeroPseudoVersion(semver.Major(v)) +} + +// PseudoVersionTime returns the time stamp of the pseudo-version v. +// It returns an error if v is not a pseudo-version or if the time stamp +// embedded in the pseudo-version is not a valid time. +func PseudoVersionTime(v string) (time.Time, error) { + _, timestamp, _, _, err := parsePseudoVersion(v) + if err != nil { + return time.Time{}, err + } + t, err := time.Parse("20060102150405", timestamp) + if err != nil { + return time.Time{}, &InvalidVersionError{ + Version: v, + Pseudo: true, + Err: fmt.Errorf("malformed time %q", timestamp), + } + } + return t, nil +} + +// PseudoVersionRev returns the revision identifier of the pseudo-version v. +// It returns an error if v is not a pseudo-version. +func PseudoVersionRev(v string) (rev string, err error) { + _, _, rev, _, err = parsePseudoVersion(v) + return +} + +// PseudoVersionBase returns the canonical parent version, if any, upon which +// the pseudo-version v is based. +// +// If v has no parent version (that is, if it is "vX.0.0-[…]"), +// PseudoVersionBase returns the empty string and a nil error. +func PseudoVersionBase(v string) (string, error) { + base, _, _, build, err := parsePseudoVersion(v) + if err != nil { + return "", err + } + + switch pre := semver.Prerelease(base); pre { + case "": + // vX.0.0-yyyymmddhhmmss-abcdef123456 → "" + if build != "" { + // Pseudo-versions of the form vX.0.0-yyyymmddhhmmss-abcdef123456+incompatible + // are nonsensical: the "vX.0.0-" prefix implies that there is no parent tag, + // but the "+incompatible" suffix implies that the major version of + // the parent tag is not compatible with the module's import path. + // + // There are a few such entries in the index generated by proxy.golang.org, + // but we believe those entries were generated by the proxy itself. + return "", &InvalidVersionError{ + Version: v, + Pseudo: true, + Err: fmt.Errorf("lacks base version, but has build metadata %q", build), + } + } + return "", nil + + case "-0": + // vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456 → vX.Y.Z + // vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456+incompatible → vX.Y.Z+incompatible + base = strings.TrimSuffix(base, pre) + i := strings.LastIndexByte(base, '.') + if i < 0 { + panic("base from parsePseudoVersion missing patch number: " + base) + } + patch := decDecimal(base[i+1:]) + if patch == "" { + // vX.0.0-0 is invalid, but has been observed in the wild in the index + // generated by requests to proxy.golang.org. + // + // NOTE(bcmills): I cannot find a historical bug that accounts for + // pseudo-versions of this form, nor have I seen such versions in any + // actual go.mod files. If we find actual examples of this form and a + // reasonable theory of how they came into existence, it seems fine to + // treat them as equivalent to vX.0.0 (especially since the invalid + // pseudo-versions have lower precedence than the real ones). For now, we + // reject them. + return "", &InvalidVersionError{ + Version: v, + Pseudo: true, + Err: fmt.Errorf("version before %s would have negative patch number", base), + } + } + return base[:i+1] + patch + build, nil + + default: + // vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456 → vX.Y.Z-pre + // vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456+incompatible → vX.Y.Z-pre+incompatible + if !strings.HasSuffix(base, ".0") { + panic(`base from parsePseudoVersion missing ".0" before date: ` + base) + } + return strings.TrimSuffix(base, ".0") + build, nil + } +} + +var errPseudoSyntax = errors.New("syntax error") + +func parsePseudoVersion(v string) (base, timestamp, rev, build string, err error) { + if !IsPseudoVersion(v) { + return "", "", "", "", &InvalidVersionError{ + Version: v, + Pseudo: true, + Err: errPseudoSyntax, + } + } + build = semver.Build(v) + v = strings.TrimSuffix(v, build) + j := strings.LastIndex(v, "-") + v, rev = v[:j], v[j+1:] + i := strings.LastIndex(v, "-") + if j := strings.LastIndex(v, "."); j > i { + base = v[:j] // "vX.Y.Z-pre.0" or "vX.Y.(Z+1)-0" + timestamp = v[j+1:] + } else { + base = v[:i] // "vX.0.0" + timestamp = v[i+1:] + } + return base, timestamp, rev, build, nil +} diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go new file mode 100644 index 00000000..9a2dfd33 --- /dev/null +++ b/vendor/golang.org/x/mod/semver/semver.go @@ -0,0 +1,401 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semver implements comparison of semantic version strings. +// In this package, semantic version strings must begin with a leading "v", +// as in "v1.0.0". +// +// The general form of a semantic version string accepted by this package is +// +// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]] +// +// where square brackets indicate optional parts of the syntax; +// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros; +// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers +// using only alphanumeric characters and hyphens; and +// all-numeric PRERELEASE identifiers must not have leading zeros. +// +// This package follows Semantic Versioning 2.0.0 (see semver.org) +// with two exceptions. First, it requires the "v" prefix. Second, it recognizes +// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes) +// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. +package semver + +import "sort" + +// parsed returns the parsed form of a semantic version string. +type parsed struct { + major string + minor string + patch string + short string + prerelease string + build string +} + +// IsValid reports whether v is a valid semantic version string. +func IsValid(v string) bool { + _, ok := parse(v) + return ok +} + +// Canonical returns the canonical formatting of the semantic version v. +// It fills in any missing .MINOR or .PATCH and discards build metadata. +// Two semantic versions compare equal only if their canonical formattings +// are identical strings. +// The canonical invalid semantic version is the empty string. +func Canonical(v string) string { + p, ok := parse(v) + if !ok { + return "" + } + if p.build != "" { + return v[:len(v)-len(p.build)] + } + if p.short != "" { + return v + p.short + } + return v +} + +// Major returns the major version prefix of the semantic version v. +// For example, Major("v2.1.0") == "v2". +// If v is an invalid semantic version string, Major returns the empty string. +func Major(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return v[:1+len(pv.major)] +} + +// MajorMinor returns the major.minor version prefix of the semantic version v. +// For example, MajorMinor("v2.1.0") == "v2.1". +// If v is an invalid semantic version string, MajorMinor returns the empty string. +func MajorMinor(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + i := 1 + len(pv.major) + if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor { + return v[:j] + } + return v[:i] + "." + pv.minor +} + +// Prerelease returns the prerelease suffix of the semantic version v. +// For example, Prerelease("v2.1.0-pre+meta") == "-pre". +// If v is an invalid semantic version string, Prerelease returns the empty string. +func Prerelease(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.prerelease +} + +// Build returns the build suffix of the semantic version v. +// For example, Build("v2.1.0+meta") == "+meta". +// If v is an invalid semantic version string, Build returns the empty string. +func Build(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.build +} + +// Compare returns an integer comparing two versions according to +// semantic version precedence. +// The result will be 0 if v == w, -1 if v < w, or +1 if v > w. +// +// An invalid semantic version string is considered less than a valid one. +// All invalid semantic version strings compare equal to each other. +func Compare(v, w string) int { + pv, ok1 := parse(v) + pw, ok2 := parse(w) + if !ok1 && !ok2 { + return 0 + } + if !ok1 { + return -1 + } + if !ok2 { + return +1 + } + if c := compareInt(pv.major, pw.major); c != 0 { + return c + } + if c := compareInt(pv.minor, pw.minor); c != 0 { + return c + } + if c := compareInt(pv.patch, pw.patch); c != 0 { + return c + } + return comparePrerelease(pv.prerelease, pw.prerelease) +} + +// Max canonicalizes its arguments and then returns the version string +// that compares greater. +// +// Deprecated: use [Compare] instead. In most cases, returning a canonicalized +// version is not expected or desired. +func Max(v, w string) string { + v = Canonical(v) + w = Canonical(w) + if Compare(v, w) > 0 { + return v + } + return w +} + +// ByVersion implements [sort.Interface] for sorting semantic version strings. +type ByVersion []string + +func (vs ByVersion) Len() int { return len(vs) } +func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } +func (vs ByVersion) Less(i, j int) bool { + cmp := Compare(vs[i], vs[j]) + if cmp != 0 { + return cmp < 0 + } + return vs[i] < vs[j] +} + +// Sort sorts a list of semantic version strings using [ByVersion]. +func Sort(list []string) { + sort.Sort(ByVersion(list)) +} + +func parse(v string) (p parsed, ok bool) { + if v == "" || v[0] != 'v' { + return + } + p.major, v, ok = parseInt(v[1:]) + if !ok { + return + } + if v == "" { + p.minor = "0" + p.patch = "0" + p.short = ".0.0" + return + } + if v[0] != '.' { + ok = false + return + } + p.minor, v, ok = parseInt(v[1:]) + if !ok { + return + } + if v == "" { + p.patch = "0" + p.short = ".0" + return + } + if v[0] != '.' { + ok = false + return + } + p.patch, v, ok = parseInt(v[1:]) + if !ok { + return + } + if len(v) > 0 && v[0] == '-' { + p.prerelease, v, ok = parsePrerelease(v) + if !ok { + return + } + } + if len(v) > 0 && v[0] == '+' { + p.build, v, ok = parseBuild(v) + if !ok { + return + } + } + if v != "" { + ok = false + return + } + ok = true + return +} + +func parseInt(v string) (t, rest string, ok bool) { + if v == "" { + return + } + if v[0] < '0' || '9' < v[0] { + return + } + i := 1 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + if v[0] == '0' && i != 1 { + return + } + return v[:i], v[i:], true +} + +func parsePrerelease(v string) (t, rest string, ok bool) { + // "A pre-release version MAY be denoted by appending a hyphen and + // a series of dot separated identifiers immediately following the patch version. + // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-]. + // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes." + if v == "" || v[0] != '-' { + return + } + i := 1 + start := 1 + for i < len(v) && v[i] != '+' { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i || isBadNum(v[start:i]) { + return + } + start = i + 1 + } + i++ + } + if start == i || isBadNum(v[start:i]) { + return + } + return v[:i], v[i:], true +} + +func parseBuild(v string) (t, rest string, ok bool) { + if v == "" || v[0] != '+' { + return + } + i := 1 + start := 1 + for i < len(v) { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i { + return + } + start = i + 1 + } + i++ + } + if start == i { + return + } + return v[:i], v[i:], true +} + +func isIdentChar(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-' +} + +func isBadNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) && i > 1 && v[0] == '0' +} + +func isNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) +} + +func compareInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} + +func comparePrerelease(x, y string) int { + // "When major, minor, and patch are equal, a pre-release version has + // lower precedence than a normal version. + // Example: 1.0.0-alpha < 1.0.0. + // Precedence for two pre-release versions with the same major, minor, + // and patch version MUST be determined by comparing each dot separated + // identifier from left to right until a difference is found as follows: + // identifiers consisting of only digits are compared numerically and + // identifiers with letters or hyphens are compared lexically in ASCII + // sort order. Numeric identifiers always have lower precedence than + // non-numeric identifiers. A larger set of pre-release fields has a + // higher precedence than a smaller set, if all of the preceding + // identifiers are equal. + // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta < + // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0." + if x == y { + return 0 + } + if x == "" { + return +1 + } + if y == "" { + return -1 + } + for x != "" && y != "" { + x = x[1:] // skip - or . + y = y[1:] // skip - or . + var dx, dy string + dx, x = nextIdent(x) + dy, y = nextIdent(y) + if dx != dy { + ix := isNum(dx) + iy := isNum(dy) + if ix != iy { + if ix { + return -1 + } else { + return +1 + } + } + if ix { + if len(dx) < len(dy) { + return -1 + } + if len(dx) > len(dy) { + return +1 + } + } + if dx < dy { + return -1 + } else { + return +1 + } + } + } + if x == "" { + return -1 + } else { + return +1 + } +} + +func nextIdent(x string) (dx, rest string) { + i := 0 + for i < len(x) && x[i] != '.' { + i++ + } + return x[:i], x[i:] +} diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/tools/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/tools/PATENTS b/vendor/golang.org/x/tools/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/tools/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go new file mode 100644 index 00000000..2c4c4e23 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -0,0 +1,634 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +// This file defines utilities for working with source positions. + +import ( + "fmt" + "go/ast" + "go/token" + "sort" +) + +// PathEnclosingInterval returns the node that encloses the source +// interval [start, end), and all its ancestors up to the AST root. +// +// The definition of "enclosing" used by this function considers +// additional whitespace abutting a node to be enclosed by it. +// In this example: +// +// z := x + y // add them +// <-A-> +// <----B-----> +// +// the ast.BinaryExpr(+) node is considered to enclose interval B +// even though its [Pos()..End()) is actually only interval A. +// This behaviour makes user interfaces more tolerant of imperfect +// input. +// +// This function treats tokens as nodes, though they are not included +// in the result. e.g. PathEnclosingInterval("+") returns the +// enclosing ast.BinaryExpr("x + y"). +// +// If start==end, the 1-char interval following start is used instead. +// +// The 'exact' result is true if the interval contains only path[0] +// and perhaps some adjacent whitespace. It is false if the interval +// overlaps multiple children of path[0], or if it contains only +// interior whitespace of path[0]. +// In this example: +// +// z := x + y // add them +// <--C--> <---E--> +// ^ +// D +// +// intervals C, D and E are inexact. C is contained by the +// z-assignment statement, because it spans three of its children (:=, +// x, +). So too is the 1-char interval D, because it contains only +// interior whitespace of the assignment. E is considered interior +// whitespace of the BlockStmt containing the assignment. +// +// The resulting path is never empty; it always contains at least the +// 'root' *ast.File. Ideally PathEnclosingInterval would reject +// intervals that lie wholly or partially outside the range of the +// file, but unfortunately ast.File records only the token.Pos of +// the 'package' keyword, but not of the start of the file itself. +func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) { + // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging + + // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end). + var visit func(node ast.Node) bool + visit = func(node ast.Node) bool { + path = append(path, node) + + nodePos := node.Pos() + nodeEnd := node.End() + + // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging + + // Intersect [start, end) with interval of node. + if start < nodePos { + start = nodePos + } + if end > nodeEnd { + end = nodeEnd + } + + // Find sole child that contains [start, end). + children := childrenOf(node) + l := len(children) + for i, child := range children { + // [childPos, childEnd) is unaugmented interval of child. + childPos := child.Pos() + childEnd := child.End() + + // [augPos, augEnd) is whitespace-augmented interval of child. + augPos := childPos + augEnd := childEnd + if i > 0 { + augPos = children[i-1].End() // start of preceding whitespace + } + if i < l-1 { + nextChildPos := children[i+1].Pos() + // Does [start, end) lie between child and next child? + if start >= augEnd && end <= nextChildPos { + return false // inexact match + } + augEnd = nextChildPos // end of following whitespace + } + + // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n", + // i, augPos, augEnd, start, end) // debugging + + // Does augmented child strictly contain [start, end)? + if augPos <= start && end <= augEnd { + _, isToken := child.(tokenNode) + return isToken || visit(child) + } + + // Does [start, end) overlap multiple children? + // i.e. left-augmented child contains start + // but LR-augmented child does not contain end. + if start < childEnd && end > augEnd { + break + } + } + + // No single child contained [start, end), + // so node is the result. Is it exact? + + // (It's tempting to put this condition before the + // child loop, but it gives the wrong result in the + // case where a node (e.g. ExprStmt) and its sole + // child have equal intervals.) + if start == nodePos && end == nodeEnd { + return true // exact match + } + + return false // inexact: overlaps multiple children + } + + // Ensure [start,end) is nondecreasing. + if start > end { + start, end = end, start + } + + if start < root.End() && end > root.Pos() { + if start == end { + end = start + 1 // empty interval => interval of size 1 + } + exact = visit(root) + + // Reverse the path: + for i, l := 0, len(path); i < l/2; i++ { + path[i], path[l-1-i] = path[l-1-i], path[i] + } + } else { + // Selection lies within whitespace preceding the + // first (or following the last) declaration in the file. + // The result nonetheless always includes the ast.File. + path = append(path, root) + } + + return +} + +// tokenNode is a dummy implementation of ast.Node for a single token. +// They are used transiently by PathEnclosingInterval but never escape +// this package. +type tokenNode struct { + pos token.Pos + end token.Pos +} + +func (n tokenNode) Pos() token.Pos { + return n.pos +} + +func (n tokenNode) End() token.Pos { + return n.end +} + +func tok(pos token.Pos, len int) ast.Node { + return tokenNode{pos, pos + token.Pos(len)} +} + +// childrenOf returns the direct non-nil children of ast.Node n. +// It may include fake ast.Node implementations for bare tokens. +// it is not safe to call (e.g.) ast.Walk on such nodes. +func childrenOf(n ast.Node) []ast.Node { + var children []ast.Node + + // First add nodes for all true subtrees. + ast.Inspect(n, func(node ast.Node) bool { + if node == n { // push n + return true // recur + } + if node != nil { // push child + children = append(children, node) + } + return false // no recursion + }) + + // Then add fake Nodes for bare tokens. + switch n := n.(type) { + case *ast.ArrayType: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Elt.End(), len("]"))) + + case *ast.AssignStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.BasicLit: + children = append(children, + tok(n.ValuePos, len(n.Value))) + + case *ast.BinaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.BlockStmt: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("}"))) + + case *ast.BranchStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.CallExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + if n.Ellipsis != 0 { + children = append(children, tok(n.Ellipsis, len("..."))) + } + + case *ast.CaseClause: + if n.List == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.ChanType: + switch n.Dir { + case ast.RECV: + children = append(children, tok(n.Begin, len("<-chan"))) + case ast.SEND: + children = append(children, tok(n.Begin, len("chan<-"))) + case ast.RECV | ast.SEND: + children = append(children, tok(n.Begin, len("chan"))) + } + + case *ast.CommClause: + if n.Comm == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.Comment: + // nop + + case *ast.CommentGroup: + // nop + + case *ast.CompositeLit: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("{"))) + + case *ast.DeclStmt: + // nop + + case *ast.DeferStmt: + children = append(children, + tok(n.Defer, len("defer"))) + + case *ast.Ellipsis: + children = append(children, + tok(n.Ellipsis, len("..."))) + + case *ast.EmptyStmt: + // nop + + case *ast.ExprStmt: + // nop + + case *ast.Field: + // TODO(adonovan): Field.{Doc,Comment,Tag}? + + case *ast.FieldList: + children = append(children, + tok(n.Opening, len("(")), // or len("[") + tok(n.Closing, len(")"))) // or len("]") + + case *ast.File: + // TODO test: Doc + children = append(children, + tok(n.Package, len("package"))) + + case *ast.ForStmt: + children = append(children, + tok(n.For, len("for"))) + + case *ast.FuncDecl: + // TODO(adonovan): FuncDecl.Comment? + + // Uniquely, FuncDecl breaks the invariant that + // preorder traversal yields tokens in lexical order: + // in fact, FuncDecl.Recv precedes FuncDecl.Type.Func. + // + // As a workaround, we inline the case for FuncType + // here and order things correctly. + // + children = nil // discard ast.Walk(FuncDecl) info subtrees + children = append(children, tok(n.Type.Func, len("func"))) + if n.Recv != nil { + children = append(children, n.Recv) + } + children = append(children, n.Name) + if tparams := n.Type.TypeParams; tparams != nil { + children = append(children, tparams) + } + if n.Type.Params != nil { + children = append(children, n.Type.Params) + } + if n.Type.Results != nil { + children = append(children, n.Type.Results) + } + if n.Body != nil { + children = append(children, n.Body) + } + + case *ast.FuncLit: + // nop + + case *ast.FuncType: + if n.Func != 0 { + children = append(children, + tok(n.Func, len("func"))) + } + + case *ast.GenDecl: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + if n.Lparen != 0 { + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + } + + case *ast.GoStmt: + children = append(children, + tok(n.Go, len("go"))) + + case *ast.Ident: + children = append(children, + tok(n.NamePos, len(n.Name))) + + case *ast.IfStmt: + children = append(children, + tok(n.If, len("if"))) + + case *ast.ImportSpec: + // TODO(adonovan): ImportSpec.{Doc,EndPos}? + + case *ast.IncDecStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.IndexExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.IndexListExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.InterfaceType: + children = append(children, + tok(n.Interface, len("interface"))) + + case *ast.KeyValueExpr: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.LabeledStmt: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.MapType: + children = append(children, + tok(n.Map, len("map"))) + + case *ast.ParenExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.RangeStmt: + children = append(children, + tok(n.For, len("for")), + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.ReturnStmt: + children = append(children, + tok(n.Return, len("return"))) + + case *ast.SelectStmt: + children = append(children, + tok(n.Select, len("select"))) + + case *ast.SelectorExpr: + // nop + + case *ast.SendStmt: + children = append(children, + tok(n.Arrow, len("<-"))) + + case *ast.SliceExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.StarExpr: + children = append(children, tok(n.Star, len("*"))) + + case *ast.StructType: + children = append(children, tok(n.Struct, len("struct"))) + + case *ast.SwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.TypeAssertExpr: + children = append(children, + tok(n.Lparen-1, len(".")), + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.TypeSpec: + // TODO(adonovan): TypeSpec.{Doc,Comment}? + + case *ast.TypeSwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.UnaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.ValueSpec: + // TODO(adonovan): ValueSpec.{Doc,Comment}? + + case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt: + // nop + } + + // TODO(adonovan): opt: merge the logic of ast.Inspect() into + // the switch above so we can make interleaved callbacks for + // both Nodes and Tokens in the right order and avoid the need + // to sort. + sort.Sort(byPos(children)) + + return children +} + +type byPos []ast.Node + +func (sl byPos) Len() int { + return len(sl) +} +func (sl byPos) Less(i, j int) bool { + return sl[i].Pos() < sl[j].Pos() +} +func (sl byPos) Swap(i, j int) { + sl[i], sl[j] = sl[j], sl[i] +} + +// NodeDescription returns a description of the concrete type of n suitable +// for a user interface. +// +// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident, +// StarExpr) we could be much more specific given the path to the AST +// root. Perhaps we should do that. +func NodeDescription(n ast.Node) string { + switch n := n.(type) { + case *ast.ArrayType: + return "array type" + case *ast.AssignStmt: + return "assignment" + case *ast.BadDecl: + return "bad declaration" + case *ast.BadExpr: + return "bad expression" + case *ast.BadStmt: + return "bad statement" + case *ast.BasicLit: + return "basic literal" + case *ast.BinaryExpr: + return fmt.Sprintf("binary %s operation", n.Op) + case *ast.BlockStmt: + return "block" + case *ast.BranchStmt: + switch n.Tok { + case token.BREAK: + return "break statement" + case token.CONTINUE: + return "continue statement" + case token.GOTO: + return "goto statement" + case token.FALLTHROUGH: + return "fall-through statement" + } + case *ast.CallExpr: + if len(n.Args) == 1 && !n.Ellipsis.IsValid() { + return "function call (or conversion)" + } + return "function call" + case *ast.CaseClause: + return "case clause" + case *ast.ChanType: + return "channel type" + case *ast.CommClause: + return "communication clause" + case *ast.Comment: + return "comment" + case *ast.CommentGroup: + return "comment group" + case *ast.CompositeLit: + return "composite literal" + case *ast.DeclStmt: + return NodeDescription(n.Decl) + " statement" + case *ast.DeferStmt: + return "defer statement" + case *ast.Ellipsis: + return "ellipsis" + case *ast.EmptyStmt: + return "empty statement" + case *ast.ExprStmt: + return "expression statement" + case *ast.Field: + // Can be any of these: + // struct {x, y int} -- struct field(s) + // struct {T} -- anon struct field + // interface {I} -- interface embedding + // interface {f()} -- interface method + // func (A) func(B) C -- receiver, param(s), result(s) + return "field/method/parameter" + case *ast.FieldList: + return "field/method/parameter list" + case *ast.File: + return "source file" + case *ast.ForStmt: + return "for loop" + case *ast.FuncDecl: + return "function declaration" + case *ast.FuncLit: + return "function literal" + case *ast.FuncType: + return "function type" + case *ast.GenDecl: + switch n.Tok { + case token.IMPORT: + return "import declaration" + case token.CONST: + return "constant declaration" + case token.TYPE: + return "type declaration" + case token.VAR: + return "variable declaration" + } + case *ast.GoStmt: + return "go statement" + case *ast.Ident: + return "identifier" + case *ast.IfStmt: + return "if statement" + case *ast.ImportSpec: + return "import specification" + case *ast.IncDecStmt: + if n.Tok == token.INC { + return "increment statement" + } + return "decrement statement" + case *ast.IndexExpr: + return "index expression" + case *ast.IndexListExpr: + return "index list expression" + case *ast.InterfaceType: + return "interface type" + case *ast.KeyValueExpr: + return "key/value association" + case *ast.LabeledStmt: + return "statement label" + case *ast.MapType: + return "map type" + case *ast.Package: + return "package" + case *ast.ParenExpr: + return "parenthesized " + NodeDescription(n.X) + case *ast.RangeStmt: + return "range loop" + case *ast.ReturnStmt: + return "return statement" + case *ast.SelectStmt: + return "select statement" + case *ast.SelectorExpr: + return "selector" + case *ast.SendStmt: + return "channel send" + case *ast.SliceExpr: + return "slice expression" + case *ast.StarExpr: + return "*-operation" // load/store expr or pointer type + case *ast.StructType: + return "struct type" + case *ast.SwitchStmt: + return "switch statement" + case *ast.TypeAssertExpr: + return "type assertion" + case *ast.TypeSpec: + return "type specification" + case *ast.TypeSwitchStmt: + return "type switch" + case *ast.UnaryExpr: + return fmt.Sprintf("unary %s operation", n.Op) + case *ast.ValueSpec: + return "value specification" + + } + panic(fmt.Sprintf("unexpected node type: %T", n)) +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go new file mode 100644 index 00000000..18d1adb0 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -0,0 +1,485 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package astutil contains common utilities for working with the Go AST. +package astutil // import "golang.org/x/tools/go/ast/astutil" + +import ( + "fmt" + "go/ast" + "go/token" + "strconv" + "strings" +) + +// AddImport adds the import path to the file f, if absent. +func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) { + return AddNamedImport(fset, f, "", path) +} + +// AddNamedImport adds the import with the given name and path to the file f, if absent. +// If name is not empty, it is used to rename the import. +// +// For example, calling +// +// AddNamedImport(fset, f, "pathpkg", "path") +// +// adds +// +// import pathpkg "path" +func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) { + if imports(f, name, path) { + return false + } + + newImport := &ast.ImportSpec{ + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(path), + }, + } + if name != "" { + newImport.Name = &ast.Ident{Name: name} + } + + // Find an import decl to add to. + // The goal is to find an existing import + // whose import path has the longest shared + // prefix with path. + var ( + bestMatch = -1 // length of longest shared prefix + lastImport = -1 // index in f.Decls of the file's final import decl + impDecl *ast.GenDecl // import decl containing the best match + impIndex = -1 // spec index in impDecl containing the best match + + isThirdPartyPath = isThirdParty(path) + ) + for i, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if ok && gen.Tok == token.IMPORT { + lastImport = i + // Do not add to import "C", to avoid disrupting the + // association with its doc comment, breaking cgo. + if declImports(gen, "C") { + continue + } + + // Match an empty import decl if that's all that is available. + if len(gen.Specs) == 0 && bestMatch == -1 { + impDecl = gen + } + + // Compute longest shared prefix with imports in this group and find best + // matched import spec. + // 1. Always prefer import spec with longest shared prefix. + // 2. While match length is 0, + // - for stdlib package: prefer first import spec. + // - for third party package: prefer first third party import spec. + // We cannot use last import spec as best match for third party package + // because grouped imports are usually placed last by goimports -local + // flag. + // See issue #19190. + seenAnyThirdParty := false + for j, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + p := importPath(impspec) + n := matchLen(p, path) + if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) { + bestMatch = n + impDecl = gen + impIndex = j + } + seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p) + } + } + } + + // If no import decl found, add one after the last import. + if impDecl == nil { + impDecl = &ast.GenDecl{ + Tok: token.IMPORT, + } + if lastImport >= 0 { + impDecl.TokPos = f.Decls[lastImport].End() + } else { + // There are no existing imports. + // Our new import, preceded by a blank line, goes after the package declaration + // and after the comment, if any, that starts on the same line as the + // package declaration. + impDecl.TokPos = f.Package + + file := fset.File(f.Package) + pkgLine := file.Line(f.Package) + for _, c := range f.Comments { + if file.Line(c.Pos()) > pkgLine { + break + } + // +2 for a blank line + impDecl.TokPos = c.End() + 2 + } + } + f.Decls = append(f.Decls, nil) + copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) + f.Decls[lastImport+1] = impDecl + } + + // Insert new import at insertAt. + insertAt := 0 + if impIndex >= 0 { + // insert after the found import + insertAt = impIndex + 1 + } + impDecl.Specs = append(impDecl.Specs, nil) + copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) + impDecl.Specs[insertAt] = newImport + pos := impDecl.Pos() + if insertAt > 0 { + // If there is a comment after an existing import, preserve the comment + // position by adding the new import after the comment. + if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil { + pos = spec.Comment.End() + } else { + // Assign same position as the previous import, + // so that the sorter sees it as being in the same block. + pos = impDecl.Specs[insertAt-1].Pos() + } + } + if newImport.Name != nil { + newImport.Name.NamePos = pos + } + newImport.Path.ValuePos = pos + newImport.EndPos = pos + + // Clean up parens. impDecl contains at least one spec. + if len(impDecl.Specs) == 1 { + // Remove unneeded parens. + impDecl.Lparen = token.NoPos + } else if !impDecl.Lparen.IsValid() { + // impDecl needs parens added. + impDecl.Lparen = impDecl.Specs[0].Pos() + } + + f.Imports = append(f.Imports, newImport) + + if len(f.Decls) <= 1 { + return true + } + + // Merge all the import declarations into the first one. + var first *ast.GenDecl + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { + continue + } + if first == nil { + first = gen + continue // Don't touch the first one. + } + // We now know there is more than one package in this import + // declaration. Ensure that it ends up parenthesized. + first.Lparen = first.Pos() + // Move the imports of the other import declaration to the first one. + for _, spec := range gen.Specs { + spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + first.Specs = append(first.Specs, spec) + } + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + i-- + } + + return true +} + +func isThirdParty(importPath string) bool { + // Third party package import path usually contains "." (".com", ".org", ...) + // This logic is taken from golang.org/x/tools/imports package. + return strings.Contains(importPath, ".") +} + +// DeleteImport deletes the import path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) { + return DeleteNamedImport(fset, f, "", path) +} + +// DeleteNamedImport deletes the import with the given name and path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { + var delspecs []*ast.ImportSpec + var delcomments []*ast.CommentGroup + + // Find the import nodes that import path, if any. + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT { + continue + } + for j := 0; j < len(gen.Specs); j++ { + spec := gen.Specs[j] + impspec := spec.(*ast.ImportSpec) + if importName(impspec) != name || importPath(impspec) != path { + continue + } + + // We found an import spec that imports path. + // Delete it. + delspecs = append(delspecs, impspec) + deleted = true + copy(gen.Specs[j:], gen.Specs[j+1:]) + gen.Specs = gen.Specs[:len(gen.Specs)-1] + + // If this was the last import spec in this decl, + // delete the decl, too. + if len(gen.Specs) == 0 { + copy(f.Decls[i:], f.Decls[i+1:]) + f.Decls = f.Decls[:len(f.Decls)-1] + i-- + break + } else if len(gen.Specs) == 1 { + if impspec.Doc != nil { + delcomments = append(delcomments, impspec.Doc) + } + if impspec.Comment != nil { + delcomments = append(delcomments, impspec.Comment) + } + for _, cg := range f.Comments { + // Found comment on the same line as the import spec. + if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { + delcomments = append(delcomments, cg) + break + } + } + + spec := gen.Specs[0].(*ast.ImportSpec) + + // Move the documentation right after the import decl. + if spec.Doc != nil { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + } + for _, cg := range f.Comments { + if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + break + } + } + } + if j > 0 { + lastImpspec := gen.Specs[j-1].(*ast.ImportSpec) + lastLine := fset.PositionFor(lastImpspec.Path.ValuePos, false).Line + line := fset.PositionFor(impspec.Path.ValuePos, false).Line + + // We deleted an entry but now there may be + // a blank line-sized hole where the import was. + if line-lastLine > 1 || !gen.Rparen.IsValid() { + // There was a blank line immediately preceding the deleted import, + // so there's no need to close the hole. The right parenthesis is + // invalid after AddImport to an import statement without parenthesis. + // Do nothing. + } else if line != fset.File(gen.Rparen).LineCount() { + // There was no blank line. Close the hole. + fset.File(gen.Rparen).MergeLine(line) + } + } + j-- + } + } + + // Delete imports from f.Imports. + for i := 0; i < len(f.Imports); i++ { + imp := f.Imports[i] + for j, del := range delspecs { + if imp == del { + copy(f.Imports[i:], f.Imports[i+1:]) + f.Imports = f.Imports[:len(f.Imports)-1] + copy(delspecs[j:], delspecs[j+1:]) + delspecs = delspecs[:len(delspecs)-1] + i-- + break + } + } + } + + // Delete comments from f.Comments. + for i := 0; i < len(f.Comments); i++ { + cg := f.Comments[i] + for j, del := range delcomments { + if cg == del { + copy(f.Comments[i:], f.Comments[i+1:]) + f.Comments = f.Comments[:len(f.Comments)-1] + copy(delcomments[j:], delcomments[j+1:]) + delcomments = delcomments[:len(delcomments)-1] + i-- + break + } + } + } + + if len(delspecs) > 0 { + panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) + } + + return +} + +// RewriteImport rewrites any import of path oldPath to path newPath. +func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) { + for _, imp := range f.Imports { + if importPath(imp) == oldPath { + rewrote = true + // record old End, because the default is to compute + // it using the length of imp.Path.Value. + imp.EndPos = imp.End() + imp.Path.Value = strconv.Quote(newPath) + } + } + return +} + +// UsesImport reports whether a given import is used. +func UsesImport(f *ast.File, path string) (used bool) { + spec := importSpec(f, path) + if spec == nil { + return + } + + name := spec.Name.String() + switch name { + case "": + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + case "_", ".": + // Not sure if this import is used - err on the side of caution. + return true + } + + ast.Walk(visitFn(func(n ast.Node) { + sel, ok := n.(*ast.SelectorExpr) + if ok && isTopName(sel.X, name) { + used = true + } + }), f) + + return +} + +type visitFn func(node ast.Node) + +func (fn visitFn) Visit(node ast.Node) ast.Visitor { + fn(node) + return fn +} + +// imports reports whether f has an import with the specified name and path. +func imports(f *ast.File, name, path string) bool { + for _, s := range f.Imports { + if importName(s) == name && importPath(s) == path { + return true + } + } + return false +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if importPath(s) == path { + return s + } + } + return nil +} + +// importName returns the name of s, +// or "" if the import is not named. +func importName(s *ast.ImportSpec) string { + if s.Name == nil { + return "" + } + return s.Name.Name +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return t +} + +// declImports reports whether gen contains an import of path. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +// matchLen returns the length of the longest path segment prefix shared by x and y. +func matchLen(x, y string) int { + n := 0 + for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ { + if x[i] == '/' { + n++ + } + } + return n +} + +// isTopName returns true if n is a top-level unresolved identifier with the given name. +func isTopName(n ast.Expr, name string) bool { + id, ok := n.(*ast.Ident) + return ok && id.Name == name && id.Obj == nil +} + +// Imports returns the file imports grouped by paragraph. +func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { + var groups [][]*ast.ImportSpec + + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.IMPORT { + break + } + + group := []*ast.ImportSpec{} + + var lastLine int + for _, spec := range genDecl.Specs { + importSpec := spec.(*ast.ImportSpec) + pos := importSpec.Path.ValuePos + line := fset.Position(pos).Line + if lastLine > 0 && pos > 0 && line-lastLine > 1 { + groups = append(groups, group) + group = []*ast.ImportSpec{} + } + group = append(group, importSpec) + lastLine = line + } + groups = append(groups, group) + } + + return groups +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go new file mode 100644 index 00000000..58934f76 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -0,0 +1,486 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import ( + "fmt" + "go/ast" + "reflect" + "sort" +) + +// An ApplyFunc is invoked by Apply for each node n, even if n is nil, +// before and/or after the node's children, using a Cursor describing +// the current node and providing operations on it. +// +// The return value of ApplyFunc controls the syntax tree traversal. +// See Apply for details. +type ApplyFunc func(*Cursor) bool + +// Apply traverses a syntax tree recursively, starting with root, +// and calling pre and post for each node as described below. +// Apply returns the syntax tree, possibly modified. +// +// If pre is not nil, it is called for each node before the node's +// children are traversed (pre-order). If pre returns false, no +// children are traversed, and post is not called for that node. +// +// If post is not nil, and a prior call of pre didn't return false, +// post is called for each node after its children are traversed +// (post-order). If post returns false, traversal is terminated and +// Apply returns immediately. +// +// Only fields that refer to AST nodes are considered children; +// i.e., token.Pos, Scopes, Objects, and fields of basic types +// (strings, etc.) are ignored. +// +// Children are traversed in the order in which they appear in the +// respective node's struct definition. A package's files are +// traversed in the filenames' alphabetical order. +func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) { + parent := &struct{ ast.Node }{root} + defer func() { + if r := recover(); r != nil && r != abort { + panic(r) + } + result = parent.Node + }() + a := &application{pre: pre, post: post} + a.apply(parent, "Node", nil, root) + return +} + +var abort = new(int) // singleton, to signal termination of Apply + +// A Cursor describes a node encountered during Apply. +// Information about the node and its parent is available +// from the Node, Parent, Name, and Index methods. +// +// If p is a variable of type and value of the current parent node +// c.Parent(), and f is the field identifier with name c.Name(), +// the following invariants hold: +// +// p.f == c.Node() if c.Index() < 0 +// p.f[c.Index()] == c.Node() if c.Index() >= 0 +// +// The methods Replace, Delete, InsertBefore, and InsertAfter +// can be used to change the AST without disrupting Apply. +type Cursor struct { + parent ast.Node + name string + iter *iterator // valid if non-nil + node ast.Node +} + +// Node returns the current Node. +func (c *Cursor) Node() ast.Node { return c.node } + +// Parent returns the parent of the current Node. +func (c *Cursor) Parent() ast.Node { return c.parent } + +// Name returns the name of the parent Node field that contains the current Node. +// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns +// the filename for the current Node. +func (c *Cursor) Name() string { return c.name } + +// Index reports the index >= 0 of the current Node in the slice of Nodes that +// contains it, or a value < 0 if the current Node is not part of a slice. +// The index of the current node changes if InsertBefore is called while +// processing the current node. +func (c *Cursor) Index() int { + if c.iter != nil { + return c.iter.index + } + return -1 +} + +// field returns the current node's parent field value. +func (c *Cursor) field() reflect.Value { + return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name) +} + +// Replace replaces the current Node with n. +// The replacement node is not walked by Apply. +func (c *Cursor) Replace(n ast.Node) { + if _, ok := c.node.(*ast.File); ok { + file, ok := n.(*ast.File) + if !ok { + panic("attempt to replace *ast.File with non-*ast.File") + } + c.parent.(*ast.Package).Files[c.name] = file + return + } + + v := c.field() + if i := c.Index(); i >= 0 { + v = v.Index(i) + } + v.Set(reflect.ValueOf(n)) +} + +// Delete deletes the current Node from its containing slice. +// If the current Node is not part of a slice, Delete panics. +// As a special case, if the current node is a package file, +// Delete removes it from the package's Files map. +func (c *Cursor) Delete() { + if _, ok := c.node.(*ast.File); ok { + delete(c.parent.(*ast.Package).Files, c.name) + return + } + + i := c.Index() + if i < 0 { + panic("Delete node not contained in slice") + } + v := c.field() + l := v.Len() + reflect.Copy(v.Slice(i, l), v.Slice(i+1, l)) + v.Index(l - 1).Set(reflect.Zero(v.Type().Elem())) + v.SetLen(l - 1) + c.iter.step-- +} + +// InsertAfter inserts n after the current Node in its containing slice. +// If the current Node is not part of a slice, InsertAfter panics. +// Apply does not walk n. +func (c *Cursor) InsertAfter(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertAfter node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l)) + v.Index(i + 1).Set(reflect.ValueOf(n)) + c.iter.step++ +} + +// InsertBefore inserts n before the current Node in its containing slice. +// If the current Node is not part of a slice, InsertBefore panics. +// Apply will not walk n. +func (c *Cursor) InsertBefore(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertBefore node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+1, l), v.Slice(i, l)) + v.Index(i).Set(reflect.ValueOf(n)) + c.iter.index++ +} + +// application carries all the shared data so we can pass it around cheaply. +type application struct { + pre, post ApplyFunc + cursor Cursor + iter iterator +} + +func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) { + // convert typed nil into untyped nil + if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() { + n = nil + } + + // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead + saved := a.cursor + a.cursor.parent = parent + a.cursor.name = name + a.cursor.iter = iter + a.cursor.node = n + + if a.pre != nil && !a.pre(&a.cursor) { + a.cursor = saved + return + } + + // walk children + // (the order of the cases matches the order of the corresponding node types in go/ast) + switch n := n.(type) { + case nil: + // nothing to do + + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + if n != nil { + a.applyList(n, "List") + } + + case *ast.Field: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.FieldList: + a.applyList(n, "List") + + // Expressions + case *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.Ellipsis: + a.apply(n, "Elt", nil, n.Elt) + + case *ast.FuncLit: + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + case *ast.CompositeLit: + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Elts") + + case *ast.ParenExpr: + a.apply(n, "X", nil, n.X) + + case *ast.SelectorExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Sel", nil, n.Sel) + + case *ast.IndexExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Index", nil, n.Index) + + case *ast.IndexListExpr: + a.apply(n, "X", nil, n.X) + a.applyList(n, "Indices") + + case *ast.SliceExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Low", nil, n.Low) + a.apply(n, "High", nil, n.High) + a.apply(n, "Max", nil, n.Max) + + case *ast.TypeAssertExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Type", nil, n.Type) + + case *ast.CallExpr: + a.apply(n, "Fun", nil, n.Fun) + a.applyList(n, "Args") + + case *ast.StarExpr: + a.apply(n, "X", nil, n.X) + + case *ast.UnaryExpr: + a.apply(n, "X", nil, n.X) + + case *ast.BinaryExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Y", nil, n.Y) + + case *ast.KeyValueExpr: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + // Types + case *ast.ArrayType: + a.apply(n, "Len", nil, n.Len) + a.apply(n, "Elt", nil, n.Elt) + + case *ast.StructType: + a.apply(n, "Fields", nil, n.Fields) + + case *ast.FuncType: + if tparams := n.TypeParams; tparams != nil { + a.apply(n, "TypeParams", nil, tparams) + } + a.apply(n, "Params", nil, n.Params) + a.apply(n, "Results", nil, n.Results) + + case *ast.InterfaceType: + a.apply(n, "Methods", nil, n.Methods) + + case *ast.MapType: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + case *ast.ChanType: + a.apply(n, "Value", nil, n.Value) + + // Statements + case *ast.BadStmt: + // nothing to do + + case *ast.DeclStmt: + a.apply(n, "Decl", nil, n.Decl) + + case *ast.EmptyStmt: + // nothing to do + + case *ast.LabeledStmt: + a.apply(n, "Label", nil, n.Label) + a.apply(n, "Stmt", nil, n.Stmt) + + case *ast.ExprStmt: + a.apply(n, "X", nil, n.X) + + case *ast.SendStmt: + a.apply(n, "Chan", nil, n.Chan) + a.apply(n, "Value", nil, n.Value) + + case *ast.IncDecStmt: + a.apply(n, "X", nil, n.X) + + case *ast.AssignStmt: + a.applyList(n, "Lhs") + a.applyList(n, "Rhs") + + case *ast.GoStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.DeferStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.ReturnStmt: + a.applyList(n, "Results") + + case *ast.BranchStmt: + a.apply(n, "Label", nil, n.Label) + + case *ast.BlockStmt: + a.applyList(n, "List") + + case *ast.IfStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Body", nil, n.Body) + a.apply(n, "Else", nil, n.Else) + + case *ast.CaseClause: + a.applyList(n, "List") + a.applyList(n, "Body") + + case *ast.SwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Body", nil, n.Body) + + case *ast.TypeSwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Assign", nil, n.Assign) + a.apply(n, "Body", nil, n.Body) + + case *ast.CommClause: + a.apply(n, "Comm", nil, n.Comm) + a.applyList(n, "Body") + + case *ast.SelectStmt: + a.apply(n, "Body", nil, n.Body) + + case *ast.ForStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Post", nil, n.Post) + a.apply(n, "Body", nil, n.Body) + + case *ast.RangeStmt: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + a.apply(n, "X", nil, n.X) + a.apply(n, "Body", nil, n.Body) + + // Declarations + case *ast.ImportSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Path", nil, n.Path) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.ValueSpec: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Values") + a.apply(n, "Comment", nil, n.Comment) + + case *ast.TypeSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + if tparams := n.TypeParams; tparams != nil { + a.apply(n, "TypeParams", nil, tparams) + } + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.BadDecl: + // nothing to do + + case *ast.GenDecl: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Specs") + + case *ast.FuncDecl: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Recv", nil, n.Recv) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + // Files and packages + case *ast.File: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.applyList(n, "Decls") + // Don't walk n.Comments; they have either been walked already if + // they are Doc comments, or they can be easily walked explicitly. + + case *ast.Package: + // collect and sort names for reproducible behavior + var names []string + for name := range n.Files { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + a.apply(n, name, nil, n.Files[name]) + } + + default: + panic(fmt.Sprintf("Apply: unexpected node type %T", n)) + } + + if a.post != nil && !a.post(&a.cursor) { + panic(abort) + } + + a.cursor = saved +} + +// An iterator controls iteration over a slice of nodes. +type iterator struct { + index, step int +} + +func (a *application) applyList(parent ast.Node, name string) { + // avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead + saved := a.iter + a.iter.index = 0 + for { + // must reload parent.name each time, since cursor modifications might change it + v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name) + if a.iter.index >= v.Len() { + break + } + + // element x may be nil in a bad AST - be cautious + var x ast.Node + if e := v.Index(a.iter.index); e.IsValid() { + x = e.Interface().(ast.Node) + } + + a.iter.step = 1 + a.apply(parent, name, &a.iter, x) + a.iter.index += a.iter.step + } + a.iter = saved +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go new file mode 100644 index 00000000..919d5305 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -0,0 +1,18 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import "go/ast" + +// Unparen returns e with any enclosing parentheses stripped. +func Unparen(e ast.Expr) ast.Expr { + for { + p, ok := e.(*ast.ParenExpr) + if !ok { + return e + } + e = p.X + } +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go new file mode 100644 index 00000000..1fc1de0b --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -0,0 +1,220 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package inspector provides helper functions for traversal over the +// syntax trees of a package, including node filtering by type, and +// materialization of the traversal stack. +// +// During construction, the inspector does a complete traversal and +// builds a list of push/pop events and their node type. Subsequent +// method calls that request a traversal scan this list, rather than walk +// the AST, and perform type filtering using efficient bit sets. +// +// Experiments suggest the inspector's traversals are about 2.5x faster +// than ast.Inspect, but it may take around 5 traversals for this +// benefit to amortize the inspector's construction cost. +// If efficiency is the primary concern, do not use Inspector for +// one-off traversals. +package inspector + +// There are four orthogonal features in a traversal: +// 1 type filtering +// 2 pruning +// 3 postorder calls to f +// 4 stack +// Rather than offer all of them in the API, +// only a few combinations are exposed: +// - Preorder is the fastest and has fewest features, +// but is the most commonly needed traversal. +// - Nodes and WithStack both provide pruning and postorder calls, +// even though few clients need it, because supporting two versions +// is not justified. +// More combinations could be supported by expressing them as +// wrappers around a more generic traversal, but this was measured +// and found to degrade performance significantly (30%). + +import ( + "go/ast" +) + +// An Inspector provides methods for inspecting +// (traversing) the syntax trees of a package. +type Inspector struct { + events []event +} + +// New returns an Inspector for the specified syntax trees. +func New(files []*ast.File) *Inspector { + return &Inspector{traverse(files)} +} + +// An event represents a push or a pop +// of an ast.Node during a traversal. +type event struct { + node ast.Node + typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events + index int // index of corresponding push or pop event +} + +// TODO: Experiment with storing only the second word of event.node (unsafe.Pointer). +// Type can be recovered from the sole bit in typ. + +// Preorder visits all the nodes of the files supplied to New in +// depth-first order. It calls f(n) for each node n before it visits +// n's children. +// +// The complete traversal sequence is determined by ast.Inspect. +// The types argument, if non-empty, enables type-based filtering of +// events. The function f is called only for nodes whose type +// matches an element of the types slice. +func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { + // Because it avoids postorder calls to f, and the pruning + // check, Preorder is almost twice as fast as Nodes. The two + // features seem to contribute similar slowdowns (~1.4x each). + + mask := maskOf(types) + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.index > i { + // push + if ev.typ&mask != 0 { + f(ev.node) + } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } + } + i++ + } +} + +// Nodes visits the nodes of the files supplied to New in depth-first +// order. It calls f(n, true) for each node n before it visits n's +// children. If f returns true, Nodes invokes f recursively for each +// of the non-nil children of the node, followed by a call of +// f(n, false). +// +// The complete traversal sequence is determined by ast.Inspect. +// The types argument, if non-empty, enables type-based filtering of +// events. The function f if is called only for nodes whose type +// matches an element of the types slice. +func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) { + mask := maskOf(types) + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.index > i { + // push + pop := ev.index + if ev.typ&mask != 0 { + if !f(ev.node, true) { + i = pop + 1 // jump to corresponding pop + 1 + continue + } + } + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them. + i = pop + continue + } + } else { + // pop + push := ev.index + if in.events[push].typ&mask != 0 { + f(ev.node, false) + } + } + i++ + } +} + +// WithStack visits nodes in a similar manner to Nodes, but it +// supplies each call to f an additional argument, the current +// traversal stack. The stack's first element is the outermost node, +// an *ast.File; its last is the innermost, n. +func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) { + mask := maskOf(types) + var stack []ast.Node + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.index > i { + // push + pop := ev.index + stack = append(stack, ev.node) + if ev.typ&mask != 0 { + if !f(ev.node, true, stack) { + i = pop + 1 + stack = stack[:len(stack)-1] + continue + } + } + if in.events[pop].typ&mask == 0 { + // Subtrees does not contain types: skip them. + i = pop + continue + } + } else { + // pop + push := ev.index + if in.events[push].typ&mask != 0 { + f(ev.node, false, stack) + } + stack = stack[:len(stack)-1] + } + i++ + } +} + +// traverse builds the table of events representing a traversal. +func traverse(files []*ast.File) []event { + // Preallocate approximate number of events + // based on source file extent. + // This makes traverse faster by 4x (!). + var extent int + for _, f := range files { + extent += int(f.End() - f.Pos()) + } + // This estimate is based on the net/http package. + capacity := extent * 33 / 100 + if capacity > 1e6 { + capacity = 1e6 // impose some reasonable maximum + } + events := make([]event, 0, capacity) + + var stack []event + stack = append(stack, event{}) // include an extra event so file nodes have a parent + for _, f := range files { + ast.Inspect(f, func(n ast.Node) bool { + if n != nil { + // push + ev := event{ + node: n, + typ: 0, // temporarily used to accumulate type bits of subtree + index: len(events), // push event temporarily holds own index + } + stack = append(stack, ev) + events = append(events, ev) + } else { + // pop + top := len(stack) - 1 + ev := stack[top] + typ := typeOf(ev.node) + push := ev.index + parent := top - 1 + + events[push].typ = typ // set type of push + stack[parent].typ |= typ | ev.typ // parent's typ contains push and pop's typs. + events[push].index = len(events) // make push refer to pop + + stack = stack[:top] + events = append(events, ev) + } + return true + }) + } + + return events +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go new file mode 100644 index 00000000..2a872f89 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -0,0 +1,227 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inspector + +// This file defines func typeOf(ast.Node) uint64. +// +// The initial map-based implementation was too slow; +// see https://go-review.googlesource.com/c/tools/+/135655/1/go/ast/inspector/inspector.go#196 + +import ( + "go/ast" + "math" +) + +const ( + nArrayType = iota + nAssignStmt + nBadDecl + nBadExpr + nBadStmt + nBasicLit + nBinaryExpr + nBlockStmt + nBranchStmt + nCallExpr + nCaseClause + nChanType + nCommClause + nComment + nCommentGroup + nCompositeLit + nDeclStmt + nDeferStmt + nEllipsis + nEmptyStmt + nExprStmt + nField + nFieldList + nFile + nForStmt + nFuncDecl + nFuncLit + nFuncType + nGenDecl + nGoStmt + nIdent + nIfStmt + nImportSpec + nIncDecStmt + nIndexExpr + nIndexListExpr + nInterfaceType + nKeyValueExpr + nLabeledStmt + nMapType + nPackage + nParenExpr + nRangeStmt + nReturnStmt + nSelectStmt + nSelectorExpr + nSendStmt + nSliceExpr + nStarExpr + nStructType + nSwitchStmt + nTypeAssertExpr + nTypeSpec + nTypeSwitchStmt + nUnaryExpr + nValueSpec +) + +// typeOf returns a distinct single-bit value that represents the type of n. +// +// Various implementations were benchmarked with BenchmarkNewInspector: +// +// GOGC=off +// - type switch 4.9-5.5ms 2.1ms +// - binary search over a sorted list of types 5.5-5.9ms 2.5ms +// - linear scan, frequency-ordered list 5.9-6.1ms 2.7ms +// - linear scan, unordered list 6.4ms 2.7ms +// - hash table 6.5ms 3.1ms +// +// A perfect hash seemed like overkill. +// +// The compiler's switch statement is the clear winner +// as it produces a binary tree in code, +// with constant conditions and good branch prediction. +// (Sadly it is the most verbose in source code.) +// Binary search suffered from poor branch prediction. +func typeOf(n ast.Node) uint64 { + // Fast path: nearly half of all nodes are identifiers. + if _, ok := n.(*ast.Ident); ok { + return 1 << nIdent + } + + // These cases include all nodes encountered by ast.Inspect. + switch n.(type) { + case *ast.ArrayType: + return 1 << nArrayType + case *ast.AssignStmt: + return 1 << nAssignStmt + case *ast.BadDecl: + return 1 << nBadDecl + case *ast.BadExpr: + return 1 << nBadExpr + case *ast.BadStmt: + return 1 << nBadStmt + case *ast.BasicLit: + return 1 << nBasicLit + case *ast.BinaryExpr: + return 1 << nBinaryExpr + case *ast.BlockStmt: + return 1 << nBlockStmt + case *ast.BranchStmt: + return 1 << nBranchStmt + case *ast.CallExpr: + return 1 << nCallExpr + case *ast.CaseClause: + return 1 << nCaseClause + case *ast.ChanType: + return 1 << nChanType + case *ast.CommClause: + return 1 << nCommClause + case *ast.Comment: + return 1 << nComment + case *ast.CommentGroup: + return 1 << nCommentGroup + case *ast.CompositeLit: + return 1 << nCompositeLit + case *ast.DeclStmt: + return 1 << nDeclStmt + case *ast.DeferStmt: + return 1 << nDeferStmt + case *ast.Ellipsis: + return 1 << nEllipsis + case *ast.EmptyStmt: + return 1 << nEmptyStmt + case *ast.ExprStmt: + return 1 << nExprStmt + case *ast.Field: + return 1 << nField + case *ast.FieldList: + return 1 << nFieldList + case *ast.File: + return 1 << nFile + case *ast.ForStmt: + return 1 << nForStmt + case *ast.FuncDecl: + return 1 << nFuncDecl + case *ast.FuncLit: + return 1 << nFuncLit + case *ast.FuncType: + return 1 << nFuncType + case *ast.GenDecl: + return 1 << nGenDecl + case *ast.GoStmt: + return 1 << nGoStmt + case *ast.Ident: + return 1 << nIdent + case *ast.IfStmt: + return 1 << nIfStmt + case *ast.ImportSpec: + return 1 << nImportSpec + case *ast.IncDecStmt: + return 1 << nIncDecStmt + case *ast.IndexExpr: + return 1 << nIndexExpr + case *ast.IndexListExpr: + return 1 << nIndexListExpr + case *ast.InterfaceType: + return 1 << nInterfaceType + case *ast.KeyValueExpr: + return 1 << nKeyValueExpr + case *ast.LabeledStmt: + return 1 << nLabeledStmt + case *ast.MapType: + return 1 << nMapType + case *ast.Package: + return 1 << nPackage + case *ast.ParenExpr: + return 1 << nParenExpr + case *ast.RangeStmt: + return 1 << nRangeStmt + case *ast.ReturnStmt: + return 1 << nReturnStmt + case *ast.SelectStmt: + return 1 << nSelectStmt + case *ast.SelectorExpr: + return 1 << nSelectorExpr + case *ast.SendStmt: + return 1 << nSendStmt + case *ast.SliceExpr: + return 1 << nSliceExpr + case *ast.StarExpr: + return 1 << nStarExpr + case *ast.StructType: + return 1 << nStructType + case *ast.SwitchStmt: + return 1 << nSwitchStmt + case *ast.TypeAssertExpr: + return 1 << nTypeAssertExpr + case *ast.TypeSpec: + return 1 << nTypeSpec + case *ast.TypeSwitchStmt: + return 1 << nTypeSwitchStmt + case *ast.UnaryExpr: + return 1 << nUnaryExpr + case *ast.ValueSpec: + return 1 << nValueSpec + } + return 0 +} + +func maskOf(nodes []ast.Node) uint64 { + if nodes == nil { + return math.MaxUint64 // match all node types + } + var mask uint64 + for _, n := range nodes { + mask |= typeOf(n) + } + return mask +} diff --git a/vendor/golang.org/x/tools/imports/forward.go b/vendor/golang.org/x/tools/imports/forward.go new file mode 100644 index 00000000..cb6db889 --- /dev/null +++ b/vendor/golang.org/x/tools/imports/forward.go @@ -0,0 +1,77 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package imports implements a Go pretty-printer (like package "go/format") +// that also adds or removes import statements as necessary. +package imports // import "golang.org/x/tools/imports" + +import ( + "log" + "os" + + "golang.org/x/tools/internal/gocommand" + intimp "golang.org/x/tools/internal/imports" +) + +// Options specifies options for processing files. +type Options struct { + Fragment bool // Accept fragment of a source file (no package statement) + AllErrors bool // Report all errors (not just the first 10 on different lines) + + Comments bool // Print comments (true if nil *Options provided) + TabIndent bool // Use tabs for indent (true if nil *Options provided) + TabWidth int // Tab width (8 if nil *Options provided) + + FormatOnly bool // Disable the insertion and deletion of imports +} + +// Debug controls verbose logging. +var Debug = false + +// LocalPrefix is a comma-separated string of import path prefixes, which, if +// set, instructs Process to sort the import paths with the given prefixes +// into another group after 3rd-party packages. +var LocalPrefix string + +// Process formats and adjusts imports for the provided file. +// If opt is nil the defaults are used, and if src is nil the source +// is read from the filesystem. +// +// Note that filename's directory influences which imports can be chosen, +// so it is important that filename be accurate. +// To process data “as if” it were in filename, pass the data as a non-nil src. +func Process(filename string, src []byte, opt *Options) ([]byte, error) { + var err error + if src == nil { + src, err = os.ReadFile(filename) + if err != nil { + return nil, err + } + } + if opt == nil { + opt = &Options{Comments: true, TabIndent: true, TabWidth: 8} + } + intopt := &intimp.Options{ + Env: &intimp.ProcessEnv{ + GocmdRunner: &gocommand.Runner{}, + }, + LocalPrefix: LocalPrefix, + AllErrors: opt.AllErrors, + Comments: opt.Comments, + FormatOnly: opt.FormatOnly, + Fragment: opt.Fragment, + TabIndent: opt.TabIndent, + TabWidth: opt.TabWidth, + } + if Debug { + intopt.Env.Logf = log.Printf + } + return intimp.Process(filename, src, intopt) +} + +// VendorlessPath returns the devendorized version of the import path ipath. +// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b". +func VendorlessPath(ipath string) string { + return intimp.VendorlessPath(ipath) +} diff --git a/vendor/golang.org/x/tools/internal/event/core/event.go b/vendor/golang.org/x/tools/internal/event/core/event.go new file mode 100644 index 00000000..a6cf0e64 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/core/event.go @@ -0,0 +1,85 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package core provides support for event based telemetry. +package core + +import ( + "fmt" + "time" + + "golang.org/x/tools/internal/event/label" +) + +// Event holds the information about an event of note that occurred. +type Event struct { + at time.Time + + // As events are often on the stack, storing the first few labels directly + // in the event can avoid an allocation at all for the very common cases of + // simple events. + // The length needs to be large enough to cope with the majority of events + // but no so large as to cause undue stack pressure. + // A log message with two values will use 3 labels (one for each value and + // one for the message itself). + + static [3]label.Label // inline storage for the first few labels + dynamic []label.Label // dynamically sized storage for remaining labels +} + +// eventLabelMap implements label.Map for a the labels of an Event. +type eventLabelMap struct { + event Event +} + +func (ev Event) At() time.Time { return ev.at } + +func (ev Event) Format(f fmt.State, r rune) { + if !ev.at.IsZero() { + fmt.Fprint(f, ev.at.Format("2006/01/02 15:04:05 ")) + } + for index := 0; ev.Valid(index); index++ { + if l := ev.Label(index); l.Valid() { + fmt.Fprintf(f, "\n\t%v", l) + } + } +} + +func (ev Event) Valid(index int) bool { + return index >= 0 && index < len(ev.static)+len(ev.dynamic) +} + +func (ev Event) Label(index int) label.Label { + if index < len(ev.static) { + return ev.static[index] + } + return ev.dynamic[index-len(ev.static)] +} + +func (ev Event) Find(key label.Key) label.Label { + for _, l := range ev.static { + if l.Key() == key { + return l + } + } + for _, l := range ev.dynamic { + if l.Key() == key { + return l + } + } + return label.Label{} +} + +func MakeEvent(static [3]label.Label, labels []label.Label) Event { + return Event{ + static: static, + dynamic: labels, + } +} + +// CloneEvent event returns a copy of the event with the time adjusted to at. +func CloneEvent(ev Event, at time.Time) Event { + ev.at = at + return ev +} diff --git a/vendor/golang.org/x/tools/internal/event/core/export.go b/vendor/golang.org/x/tools/internal/event/core/export.go new file mode 100644 index 00000000..05f3a9a5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/core/export.go @@ -0,0 +1,70 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + "sync/atomic" + "time" + "unsafe" + + "golang.org/x/tools/internal/event/label" +) + +// Exporter is a function that handles events. +// It may return a modified context and event. +type Exporter func(context.Context, Event, label.Map) context.Context + +var ( + exporter unsafe.Pointer +) + +// SetExporter sets the global exporter function that handles all events. +// The exporter is called synchronously from the event call site, so it should +// return quickly so as not to hold up user code. +func SetExporter(e Exporter) { + p := unsafe.Pointer(&e) + if e == nil { + // &e is always valid, and so p is always valid, but for the early abort + // of ProcessEvent to be efficient it needs to make the nil check on the + // pointer without having to dereference it, so we make the nil function + // also a nil pointer + p = nil + } + atomic.StorePointer(&exporter, p) +} + +// deliver is called to deliver an event to the supplied exporter. +// it will fill in the time. +func deliver(ctx context.Context, exporter Exporter, ev Event) context.Context { + // add the current time to the event + ev.at = time.Now() + // hand the event off to the current exporter + return exporter(ctx, ev, ev) +} + +// Export is called to deliver an event to the global exporter if set. +func Export(ctx context.Context, ev Event) context.Context { + // get the global exporter and abort early if there is not one + exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + if exporterPtr == nil { + return ctx + } + return deliver(ctx, *exporterPtr, ev) +} + +// ExportPair is called to deliver a start event to the supplied exporter. +// It also returns a function that will deliver the end event to the same +// exporter. +// It will fill in the time. +func ExportPair(ctx context.Context, begin, end Event) (context.Context, func()) { + // get the global exporter and abort early if there is not one + exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + if exporterPtr == nil { + return ctx, func() {} + } + ctx = deliver(ctx, *exporterPtr, begin) + return ctx, func() { deliver(ctx, *exporterPtr, end) } +} diff --git a/vendor/golang.org/x/tools/internal/event/core/fast.go b/vendor/golang.org/x/tools/internal/event/core/fast.go new file mode 100644 index 00000000..06c1d461 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/core/fast.go @@ -0,0 +1,77 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" +) + +// Log1 takes a message and one label delivers a log event to the exporter. +// It is a customized version of Print that is faster and does no allocation. +func Log1(ctx context.Context, message string, t1 label.Label) { + Export(ctx, MakeEvent([3]label.Label{ + keys.Msg.Of(message), + t1, + }, nil)) +} + +// Log2 takes a message and two labels and delivers a log event to the exporter. +// It is a customized version of Print that is faster and does no allocation. +func Log2(ctx context.Context, message string, t1 label.Label, t2 label.Label) { + Export(ctx, MakeEvent([3]label.Label{ + keys.Msg.Of(message), + t1, + t2, + }, nil)) +} + +// Metric1 sends a label event to the exporter with the supplied labels. +func Metric1(ctx context.Context, t1 label.Label) context.Context { + return Export(ctx, MakeEvent([3]label.Label{ + keys.Metric.New(), + t1, + }, nil)) +} + +// Metric2 sends a label event to the exporter with the supplied labels. +func Metric2(ctx context.Context, t1, t2 label.Label) context.Context { + return Export(ctx, MakeEvent([3]label.Label{ + keys.Metric.New(), + t1, + t2, + }, nil)) +} + +// Start1 sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start1(ctx context.Context, name string, t1 label.Label) (context.Context, func()) { + return ExportPair(ctx, + MakeEvent([3]label.Label{ + keys.Start.Of(name), + t1, + }, nil), + MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} + +// Start2 sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start2(ctx context.Context, name string, t1, t2 label.Label) (context.Context, func()) { + return ExportPair(ctx, + MakeEvent([3]label.Label{ + keys.Start.Of(name), + t1, + t2, + }, nil), + MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} diff --git a/vendor/golang.org/x/tools/internal/event/doc.go b/vendor/golang.org/x/tools/internal/event/doc.go new file mode 100644 index 00000000..5dc6e6ba --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/doc.go @@ -0,0 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package event provides a set of packages that cover the main +// concepts of telemetry in an implementation agnostic way. +package event diff --git a/vendor/golang.org/x/tools/internal/event/event.go b/vendor/golang.org/x/tools/internal/event/event.go new file mode 100644 index 00000000..4d55e577 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/event.go @@ -0,0 +1,127 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package event + +import ( + "context" + + "golang.org/x/tools/internal/event/core" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" +) + +// Exporter is a function that handles events. +// It may return a modified context and event. +type Exporter func(context.Context, core.Event, label.Map) context.Context + +// SetExporter sets the global exporter function that handles all events. +// The exporter is called synchronously from the event call site, so it should +// return quickly so as not to hold up user code. +func SetExporter(e Exporter) { + core.SetExporter(core.Exporter(e)) +} + +// Log takes a message and a label list and combines them into a single event +// before delivering them to the exporter. +func Log(ctx context.Context, message string, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Msg.Of(message), + }, labels)) +} + +// IsLog returns true if the event was built by the Log function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsLog(ev core.Event) bool { + return ev.Label(0).Key() == keys.Msg +} + +// Error takes a message and a label list and combines them into a single event +// before delivering them to the exporter. It captures the error in the +// delivered event. +func Error(ctx context.Context, message string, err error, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Msg.Of(message), + keys.Err.Of(err), + }, labels)) +} + +// IsError returns true if the event was built by the Error function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsError(ev core.Event) bool { + return ev.Label(0).Key() == keys.Msg && + ev.Label(1).Key() == keys.Err +} + +// Metric sends a label event to the exporter with the supplied labels. +func Metric(ctx context.Context, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Metric.New(), + }, labels)) +} + +// IsMetric returns true if the event was built by the Metric function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsMetric(ev core.Event) bool { + return ev.Label(0).Key() == keys.Metric +} + +// Label sends a label event to the exporter with the supplied labels. +func Label(ctx context.Context, labels ...label.Label) context.Context { + return core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Label.New(), + }, labels)) +} + +// IsLabel returns true if the event was built by the Label function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsLabel(ev core.Event) bool { + return ev.Label(0).Key() == keys.Label +} + +// Start sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start(ctx context.Context, name string, labels ...label.Label) (context.Context, func()) { + return core.ExportPair(ctx, + core.MakeEvent([3]label.Label{ + keys.Start.Of(name), + }, labels), + core.MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} + +// IsStart returns true if the event was built by the Start function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsStart(ev core.Event) bool { + return ev.Label(0).Key() == keys.Start +} + +// IsEnd returns true if the event was built by the End function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsEnd(ev core.Event) bool { + return ev.Label(0).Key() == keys.End +} + +// Detach returns a context without an associated span. +// This allows the creation of spans that are not children of the current span. +func Detach(ctx context.Context) context.Context { + return core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Detach.New(), + }, nil)) +} + +// IsDetach returns true if the event was built by the Detach function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsDetach(ev core.Event) bool { + return ev.Label(0).Key() == keys.Detach +} diff --git a/vendor/golang.org/x/tools/internal/event/keys/keys.go b/vendor/golang.org/x/tools/internal/event/keys/keys.go new file mode 100644 index 00000000..a02206e3 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/keys/keys.go @@ -0,0 +1,564 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +import ( + "fmt" + "io" + "math" + "strconv" + + "golang.org/x/tools/internal/event/label" +) + +// Value represents a key for untyped values. +type Value struct { + name string + description string +} + +// New creates a new Key for untyped values. +func New(name, description string) *Value { + return &Value{name: name, description: description} +} + +func (k *Value) Name() string { return k.name } +func (k *Value) Description() string { return k.description } + +func (k *Value) Format(w io.Writer, buf []byte, l label.Label) { + fmt.Fprint(w, k.From(l)) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Value) Get(lm label.Map) interface{} { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return nil +} + +// From can be used to get a value from a Label. +func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() } + +// Of creates a new Label with this key and the supplied value. +func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) } + +// Tag represents a key for tagging labels that have no value. +// These are used when the existence of the label is the entire information it +// carries, such as marking events to be of a specific kind, or from a specific +// package. +type Tag struct { + name string + description string +} + +// NewTag creates a new Key for tagging labels. +func NewTag(name, description string) *Tag { + return &Tag{name: name, description: description} +} + +func (k *Tag) Name() string { return k.name } +func (k *Tag) Description() string { return k.description } + +func (k *Tag) Format(w io.Writer, buf []byte, l label.Label) {} + +// New creates a new Label with this key. +func (k *Tag) New() label.Label { return label.OfValue(k, nil) } + +// Int represents a key +type Int struct { + name string + description string +} + +// NewInt creates a new Key for int values. +func NewInt(name, description string) *Int { + return &Int{name: name, description: description} +} + +func (k *Int) Name() string { return k.name } +func (k *Int) Description() string { return k.description } + +func (k *Int) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int) Of(v int) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int) Get(lm label.Map) int { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int) From(t label.Label) int { return int(t.Unpack64()) } + +// Int8 represents a key +type Int8 struct { + name string + description string +} + +// NewInt8 creates a new Key for int8 values. +func NewInt8(name, description string) *Int8 { + return &Int8{name: name, description: description} +} + +func (k *Int8) Name() string { return k.name } +func (k *Int8) Description() string { return k.description } + +func (k *Int8) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int8) Of(v int8) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int8) Get(lm label.Map) int8 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int8) From(t label.Label) int8 { return int8(t.Unpack64()) } + +// Int16 represents a key +type Int16 struct { + name string + description string +} + +// NewInt16 creates a new Key for int16 values. +func NewInt16(name, description string) *Int16 { + return &Int16{name: name, description: description} +} + +func (k *Int16) Name() string { return k.name } +func (k *Int16) Description() string { return k.description } + +func (k *Int16) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int16) Of(v int16) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int16) Get(lm label.Map) int16 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int16) From(t label.Label) int16 { return int16(t.Unpack64()) } + +// Int32 represents a key +type Int32 struct { + name string + description string +} + +// NewInt32 creates a new Key for int32 values. +func NewInt32(name, description string) *Int32 { + return &Int32{name: name, description: description} +} + +func (k *Int32) Name() string { return k.name } +func (k *Int32) Description() string { return k.description } + +func (k *Int32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int32) Of(v int32) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int32) Get(lm label.Map) int32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int32) From(t label.Label) int32 { return int32(t.Unpack64()) } + +// Int64 represents a key +type Int64 struct { + name string + description string +} + +// NewInt64 creates a new Key for int64 values. +func NewInt64(name, description string) *Int64 { + return &Int64{name: name, description: description} +} + +func (k *Int64) Name() string { return k.name } +func (k *Int64) Description() string { return k.description } + +func (k *Int64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, k.From(l), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int64) Of(v int64) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int64) Get(lm label.Map) int64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int64) From(t label.Label) int64 { return int64(t.Unpack64()) } + +// UInt represents a key +type UInt struct { + name string + description string +} + +// NewUInt creates a new Key for uint values. +func NewUInt(name, description string) *UInt { + return &UInt{name: name, description: description} +} + +func (k *UInt) Name() string { return k.name } +func (k *UInt) Description() string { return k.description } + +func (k *UInt) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt) Of(v uint) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt) Get(lm label.Map) uint { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt) From(t label.Label) uint { return uint(t.Unpack64()) } + +// UInt8 represents a key +type UInt8 struct { + name string + description string +} + +// NewUInt8 creates a new Key for uint8 values. +func NewUInt8(name, description string) *UInt8 { + return &UInt8{name: name, description: description} +} + +func (k *UInt8) Name() string { return k.name } +func (k *UInt8) Description() string { return k.description } + +func (k *UInt8) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt8) Of(v uint8) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt8) Get(lm label.Map) uint8 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt8) From(t label.Label) uint8 { return uint8(t.Unpack64()) } + +// UInt16 represents a key +type UInt16 struct { + name string + description string +} + +// NewUInt16 creates a new Key for uint16 values. +func NewUInt16(name, description string) *UInt16 { + return &UInt16{name: name, description: description} +} + +func (k *UInt16) Name() string { return k.name } +func (k *UInt16) Description() string { return k.description } + +func (k *UInt16) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt16) Of(v uint16) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt16) Get(lm label.Map) uint16 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt16) From(t label.Label) uint16 { return uint16(t.Unpack64()) } + +// UInt32 represents a key +type UInt32 struct { + name string + description string +} + +// NewUInt32 creates a new Key for uint32 values. +func NewUInt32(name, description string) *UInt32 { + return &UInt32{name: name, description: description} +} + +func (k *UInt32) Name() string { return k.name } +func (k *UInt32) Description() string { return k.description } + +func (k *UInt32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt32) Of(v uint32) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt32) Get(lm label.Map) uint32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt32) From(t label.Label) uint32 { return uint32(t.Unpack64()) } + +// UInt64 represents a key +type UInt64 struct { + name string + description string +} + +// NewUInt64 creates a new Key for uint64 values. +func NewUInt64(name, description string) *UInt64 { + return &UInt64{name: name, description: description} +} + +func (k *UInt64) Name() string { return k.name } +func (k *UInt64) Description() string { return k.description } + +func (k *UInt64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, k.From(l), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt64) Of(v uint64) label.Label { return label.Of64(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt64) Get(lm label.Map) uint64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt64) From(t label.Label) uint64 { return t.Unpack64() } + +// Float32 represents a key +type Float32 struct { + name string + description string +} + +// NewFloat32 creates a new Key for float32 values. +func NewFloat32(name, description string) *Float32 { + return &Float32{name: name, description: description} +} + +func (k *Float32) Name() string { return k.name } +func (k *Float32) Description() string { return k.description } + +func (k *Float32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendFloat(buf, float64(k.From(l)), 'E', -1, 32)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Float32) Of(v float32) label.Label { + return label.Of64(k, uint64(math.Float32bits(v))) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Float32) Get(lm label.Map) float32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Float32) From(t label.Label) float32 { + return math.Float32frombits(uint32(t.Unpack64())) +} + +// Float64 represents a key +type Float64 struct { + name string + description string +} + +// NewFloat64 creates a new Key for int64 values. +func NewFloat64(name, description string) *Float64 { + return &Float64{name: name, description: description} +} + +func (k *Float64) Name() string { return k.name } +func (k *Float64) Description() string { return k.description } + +func (k *Float64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendFloat(buf, k.From(l), 'E', -1, 64)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Float64) Of(v float64) label.Label { + return label.Of64(k, math.Float64bits(v)) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Float64) Get(lm label.Map) float64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Float64) From(t label.Label) float64 { + return math.Float64frombits(t.Unpack64()) +} + +// String represents a key +type String struct { + name string + description string +} + +// NewString creates a new Key for int64 values. +func NewString(name, description string) *String { + return &String{name: name, description: description} +} + +func (k *String) Name() string { return k.name } +func (k *String) Description() string { return k.description } + +func (k *String) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendQuote(buf, k.From(l))) +} + +// Of creates a new Label with this key and the supplied value. +func (k *String) Of(v string) label.Label { return label.OfString(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *String) Get(lm label.Map) string { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return "" +} + +// From can be used to get a value from a Label. +func (k *String) From(t label.Label) string { return t.UnpackString() } + +// Boolean represents a key +type Boolean struct { + name string + description string +} + +// NewBoolean creates a new Key for bool values. +func NewBoolean(name, description string) *Boolean { + return &Boolean{name: name, description: description} +} + +func (k *Boolean) Name() string { return k.name } +func (k *Boolean) Description() string { return k.description } + +func (k *Boolean) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendBool(buf, k.From(l))) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Boolean) Of(v bool) label.Label { + if v { + return label.Of64(k, 1) + } + return label.Of64(k, 0) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Boolean) Get(lm label.Map) bool { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return false +} + +// From can be used to get a value from a Label. +func (k *Boolean) From(t label.Label) bool { return t.Unpack64() > 0 } + +// Error represents a key +type Error struct { + name string + description string +} + +// NewError creates a new Key for int64 values. +func NewError(name, description string) *Error { + return &Error{name: name, description: description} +} + +func (k *Error) Name() string { return k.name } +func (k *Error) Description() string { return k.description } + +func (k *Error) Format(w io.Writer, buf []byte, l label.Label) { + io.WriteString(w, k.From(l).Error()) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Error) Of(v error) label.Label { return label.OfValue(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Error) Get(lm label.Map) error { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return nil +} + +// From can be used to get a value from a Label. +func (k *Error) From(t label.Label) error { + err, _ := t.UnpackValue().(error) + return err +} diff --git a/vendor/golang.org/x/tools/internal/event/keys/standard.go b/vendor/golang.org/x/tools/internal/event/keys/standard.go new file mode 100644 index 00000000..7e958665 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/keys/standard.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +var ( + // Msg is a key used to add message strings to label lists. + Msg = NewString("message", "a readable message") + // Label is a key used to indicate an event adds labels to the context. + Label = NewTag("label", "a label context marker") + // Start is used for things like traces that have a name. + Start = NewString("start", "span start") + // Metric is a key used to indicate an event records metrics. + End = NewTag("end", "a span end marker") + // Metric is a key used to indicate an event records metrics. + Detach = NewTag("detach", "a span detach marker") + // Err is a key used to add error values to label lists. + Err = NewError("error", "an error that occurred") + // Metric is a key used to indicate an event records metrics. + Metric = NewTag("metric", "a metric event marker") +) diff --git a/vendor/golang.org/x/tools/internal/event/keys/util.go b/vendor/golang.org/x/tools/internal/event/keys/util.go new file mode 100644 index 00000000..c0e8e731 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/keys/util.go @@ -0,0 +1,21 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +import ( + "sort" + "strings" +) + +// Join returns a canonical join of the keys in S: +// a sorted comma-separated string list. +func Join[S ~[]T, T ~string](s S) string { + strs := make([]string, 0, len(s)) + for _, v := range s { + strs = append(strs, string(v)) + } + sort.Strings(strs) + return strings.Join(strs, ",") +} diff --git a/vendor/golang.org/x/tools/internal/event/label/label.go b/vendor/golang.org/x/tools/internal/event/label/label.go new file mode 100644 index 00000000..0f526e1f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/label/label.go @@ -0,0 +1,215 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package label + +import ( + "fmt" + "io" + "reflect" + "unsafe" +) + +// Key is used as the identity of a Label. +// Keys are intended to be compared by pointer only, the name should be unique +// for communicating with external systems, but it is not required or enforced. +type Key interface { + // Name returns the key name. + Name() string + // Description returns a string that can be used to describe the value. + Description() string + + // Format is used in formatting to append the value of the label to the + // supplied buffer. + // The formatter may use the supplied buf as a scratch area to avoid + // allocations. + Format(w io.Writer, buf []byte, l Label) +} + +// Label holds a key and value pair. +// It is normally used when passing around lists of labels. +type Label struct { + key Key + packed uint64 + untyped interface{} +} + +// Map is the interface to a collection of Labels indexed by key. +type Map interface { + // Find returns the label that matches the supplied key. + Find(key Key) Label +} + +// List is the interface to something that provides an iterable +// list of labels. +// Iteration should start from 0 and continue until Valid returns false. +type List interface { + // Valid returns true if the index is within range for the list. + // It does not imply the label at that index will itself be valid. + Valid(index int) bool + // Label returns the label at the given index. + Label(index int) Label +} + +// list implements LabelList for a list of Labels. +type list struct { + labels []Label +} + +// filter wraps a LabelList filtering out specific labels. +type filter struct { + keys []Key + underlying List +} + +// listMap implements LabelMap for a simple list of labels. +type listMap struct { + labels []Label +} + +// mapChain implements LabelMap for a list of underlying LabelMap. +type mapChain struct { + maps []Map +} + +// OfValue creates a new label from the key and value. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} } + +// UnpackValue assumes the label was built using LabelOfValue and returns the value +// that was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) UnpackValue() interface{} { return t.untyped } + +// Of64 creates a new label from a key and a uint64. This is often +// used for non uint64 values that can be packed into a uint64. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func Of64(k Key, v uint64) Label { return Label{key: k, packed: v} } + +// Unpack64 assumes the label was built using LabelOf64 and returns the value that +// was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) Unpack64() uint64 { return t.packed } + +type stringptr unsafe.Pointer + +// OfString creates a new label from a key and a string. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func OfString(k Key, v string) Label { + hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) + return Label{ + key: k, + packed: uint64(hdr.Len), + untyped: stringptr(hdr.Data), + } +} + +// UnpackString assumes the label was built using LabelOfString and returns the +// value that was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) UnpackString() string { + var v string + hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) + hdr.Data = uintptr(t.untyped.(stringptr)) + hdr.Len = int(t.packed) + return v +} + +// Valid returns true if the Label is a valid one (it has a key). +func (t Label) Valid() bool { return t.key != nil } + +// Key returns the key of this Label. +func (t Label) Key() Key { return t.key } + +// Format is used for debug printing of labels. +func (t Label) Format(f fmt.State, r rune) { + if !t.Valid() { + io.WriteString(f, `nil`) + return + } + io.WriteString(f, t.Key().Name()) + io.WriteString(f, "=") + var buf [128]byte + t.Key().Format(f, buf[:0], t) +} + +func (l *list) Valid(index int) bool { + return index >= 0 && index < len(l.labels) +} + +func (l *list) Label(index int) Label { + return l.labels[index] +} + +func (f *filter) Valid(index int) bool { + return f.underlying.Valid(index) +} + +func (f *filter) Label(index int) Label { + l := f.underlying.Label(index) + for _, f := range f.keys { + if l.Key() == f { + return Label{} + } + } + return l +} + +func (lm listMap) Find(key Key) Label { + for _, l := range lm.labels { + if l.Key() == key { + return l + } + } + return Label{} +} + +func (c mapChain) Find(key Key) Label { + for _, src := range c.maps { + l := src.Find(key) + if l.Valid() { + return l + } + } + return Label{} +} + +var emptyList = &list{} + +func NewList(labels ...Label) List { + if len(labels) == 0 { + return emptyList + } + return &list{labels: labels} +} + +func Filter(l List, keys ...Key) List { + if len(keys) == 0 { + return l + } + return &filter{keys: keys, underlying: l} +} + +func NewMap(labels ...Label) Map { + return listMap{labels: labels} +} + +func MergeMaps(srcs ...Map) Map { + var nonNil []Map + for _, src := range srcs { + if src != nil { + nonNil = append(nonNil, src) + } + } + if len(nonNil) == 1 { + return nonNil[0] + } + return mapChain{maps: nonNil} +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go new file mode 100644 index 00000000..eb7a8282 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -0,0 +1,470 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gocommand is a helper for calling the go command. +package gocommand + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "log" + "os" + "os/exec" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" +) + +// An Runner will run go command invocations and serialize +// them if it sees a concurrency error. +type Runner struct { + // once guards the runner initialization. + once sync.Once + + // inFlight tracks available workers. + inFlight chan struct{} + + // serialized guards the ability to run a go command serially, + // to avoid deadlocks when claiming workers. + serialized chan struct{} +} + +const maxInFlight = 10 + +func (runner *Runner) initialize() { + runner.once.Do(func() { + runner.inFlight = make(chan struct{}, maxInFlight) + runner.serialized = make(chan struct{}, 1) + }) +} + +// 1.13: go: updates to go.mod needed, but contents have changed +// 1.14: go: updating go.mod: existing contents have changed since last read +var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) + +// event keys for go command invocations +var ( + verb = keys.NewString("verb", "go command verb") + directory = keys.NewString("directory", "") +) + +func invLabels(inv Invocation) []label.Label { + return []label.Label{verb.Of(inv.Verb), directory.Of(inv.WorkingDir)} +} + +// Run is a convenience wrapper around RunRaw. +// It returns only stdout and a "friendly" error. +func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...) + defer done() + + stdout, _, friendly, _ := runner.RunRaw(ctx, inv) + return stdout, friendly +} + +// RunPiped runs the invocation serially, always waiting for any concurrent +// invocations to complete first. +func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error { + ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...) + defer done() + + _, err := runner.runPiped(ctx, inv, stdout, stderr) + return err +} + +// RunRaw runs the invocation, serializing requests only if they fight over +// go.mod changes. +// Postcondition: both error results have same nilness. +func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...) + defer done() + // Make sure the runner is always initialized. + runner.initialize() + + // First, try to run the go command concurrently. + stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv) + + // If we encounter a load concurrency error, we need to retry serially. + if friendlyErr != nil && modConcurrencyError.MatchString(friendlyErr.Error()) { + event.Error(ctx, "Load concurrency error, will retry serially", err) + + // Run serially by calling runPiped. + stdout.Reset() + stderr.Reset() + friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) + } + + return stdout, stderr, friendlyErr, err +} + +// Postcondition: both error results have same nilness. +func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + // Wait for 1 worker to become available. + select { + case <-ctx.Done(): + return nil, nil, ctx.Err(), ctx.Err() + case runner.inFlight <- struct{}{}: + defer func() { <-runner.inFlight }() + } + + stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{} + friendlyErr, err := inv.runWithFriendlyError(ctx, stdout, stderr) + return stdout, stderr, friendlyErr, err +} + +// Postcondition: both error results have same nilness. +func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) { + // Make sure the runner is always initialized. + runner.initialize() + + // Acquire the serialization lock. This avoids deadlocks between two + // runPiped commands. + select { + case <-ctx.Done(): + return ctx.Err(), ctx.Err() + case runner.serialized <- struct{}{}: + defer func() { <-runner.serialized }() + } + + // Wait for all in-progress go commands to return before proceeding, + // to avoid load concurrency errors. + for i := 0; i < maxInFlight; i++ { + select { + case <-ctx.Done(): + return ctx.Err(), ctx.Err() + case runner.inFlight <- struct{}{}: + // Make sure we always "return" any workers we took. + defer func() { <-runner.inFlight }() + } + } + + return inv.runWithFriendlyError(ctx, stdout, stderr) +} + +// An Invocation represents a call to the go command. +type Invocation struct { + Verb string + Args []string + BuildFlags []string + + // If ModFlag is set, the go command is invoked with -mod=ModFlag. + // TODO(rfindley): remove, in favor of Args. + ModFlag string + + // If ModFile is set, the go command is invoked with -modfile=ModFile. + // TODO(rfindley): remove, in favor of Args. + ModFile string + + // If Overlay is set, the go command is invoked with -overlay=Overlay. + // TODO(rfindley): remove, in favor of Args. + Overlay string + + // If CleanEnv is set, the invocation will run only with the environment + // in Env, not starting with os.Environ. + CleanEnv bool + Env []string + WorkingDir string + Logf func(format string, args ...interface{}) +} + +// Postcondition: both error results have same nilness. +func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) { + rawError = i.run(ctx, stdout, stderr) + if rawError != nil { + friendlyError = rawError + // Check for 'go' executable not being found. + if ee, ok := rawError.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + friendlyError = fmt.Errorf("go command required, not found: %v", ee) + } + if ctx.Err() != nil { + friendlyError = ctx.Err() + } + friendlyError = fmt.Errorf("err: %v: stderr: %s", friendlyError, stderr) + } + return +} + +func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { + log := i.Logf + if log == nil { + log = func(string, ...interface{}) {} + } + + goArgs := []string{i.Verb} + + appendModFile := func() { + if i.ModFile != "" { + goArgs = append(goArgs, "-modfile="+i.ModFile) + } + } + appendModFlag := func() { + if i.ModFlag != "" { + goArgs = append(goArgs, "-mod="+i.ModFlag) + } + } + appendOverlayFlag := func() { + if i.Overlay != "" { + goArgs = append(goArgs, "-overlay="+i.Overlay) + } + } + + switch i.Verb { + case "env", "version": + goArgs = append(goArgs, i.Args...) + case "mod": + // mod needs the sub-verb before flags. + goArgs = append(goArgs, i.Args[0]) + appendModFile() + goArgs = append(goArgs, i.Args[1:]...) + case "get": + goArgs = append(goArgs, i.BuildFlags...) + appendModFile() + goArgs = append(goArgs, i.Args...) + + default: // notably list and build. + goArgs = append(goArgs, i.BuildFlags...) + appendModFile() + appendModFlag() + appendOverlayFlag() + goArgs = append(goArgs, i.Args...) + } + cmd := exec.Command("go", goArgs...) + cmd.Stdout = stdout + cmd.Stderr = stderr + + // cmd.WaitDelay was added only in go1.20 (see #50436). + if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() { + // https://go.dev/issue/59541: don't wait forever copying stderr + // after the command has exited. + // After CL 484741 we copy stdout manually, so we we'll stop reading that as + // soon as ctx is done. However, we also don't want to wait around forever + // for stderr. Give a much-longer-than-reasonable delay and then assume that + // something has wedged in the kernel or runtime. + waitDelay.Set(reflect.ValueOf(30 * time.Second)) + } + + // On darwin the cwd gets resolved to the real path, which breaks anything that + // expects the working directory to keep the original path, including the + // go command when dealing with modules. + // The Go stdlib has a special feature where if the cwd and the PWD are the + // same node then it trusts the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go command. + if !i.CleanEnv { + cmd.Env = os.Environ() + } + cmd.Env = append(cmd.Env, i.Env...) + if i.WorkingDir != "" { + cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) + cmd.Dir = i.WorkingDir + } + + defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) + + return runCmdContext(ctx, cmd) +} + +// DebugHangingGoCommands may be set by tests to enable additional +// instrumentation (including panics) for debugging hanging Go commands. +// +// See golang/go#54461 for details. +var DebugHangingGoCommands = false + +// runCmdContext is like exec.CommandContext except it sends os.Interrupt +// before os.Kill. +func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { + // If cmd.Stdout is not an *os.File, the exec package will create a pipe and + // copy it to the Writer in a goroutine until the process has finished and + // either the pipe reaches EOF or command's WaitDelay expires. + // + // However, the output from 'go list' can be quite large, and we don't want to + // keep reading (and allocating buffers) if we've already decided we don't + // care about the output. We don't want to wait for the process to finish, and + // we don't wait to wait for the WaitDelay to expire either. + // + // Instead, if cmd.Stdout requires a copying goroutine we explicitly replace + // it with a pipe (which is an *os.File), which we can close in order to stop + // copying output as soon as we realize we don't care about it. + var stdoutW *os.File + if cmd.Stdout != nil { + if _, ok := cmd.Stdout.(*os.File); !ok { + var stdoutR *os.File + stdoutR, stdoutW, err = os.Pipe() + if err != nil { + return err + } + prevStdout := cmd.Stdout + cmd.Stdout = stdoutW + + stdoutErr := make(chan error, 1) + go func() { + _, err := io.Copy(prevStdout, stdoutR) + if err != nil { + err = fmt.Errorf("copying stdout: %w", err) + } + stdoutErr <- err + }() + defer func() { + // We started a goroutine to copy a stdout pipe. + // Wait for it to finish, or terminate it if need be. + var err2 error + select { + case err2 = <-stdoutErr: + stdoutR.Close() + case <-ctx.Done(): + stdoutR.Close() + // Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close + // should cause the Read call in io.Copy to unblock and return + // immediately, but we still need to receive from stdoutErr to confirm + // that it has happened. + <-stdoutErr + err2 = ctx.Err() + } + if err == nil { + err = err2 + } + }() + + // Per https://pkg.go.dev/os/exec#Cmd, “If Stdout and Stderr are the + // same writer, and have a type that can be compared with ==, at most + // one goroutine at a time will call Write.” + // + // Since we're starting a goroutine that writes to cmd.Stdout, we must + // also update cmd.Stderr so that it still holds. + func() { + defer func() { recover() }() + if cmd.Stderr == prevStdout { + cmd.Stderr = cmd.Stdout + } + }() + } + } + + err = cmd.Start() + if stdoutW != nil { + // The child process has inherited the pipe file, + // so close the copy held in this process. + stdoutW.Close() + stdoutW = nil + } + if err != nil { + return err + } + + resChan := make(chan error, 1) + go func() { + resChan <- cmd.Wait() + }() + + // If we're interested in debugging hanging Go commands, stop waiting after a + // minute and panic with interesting information. + debug := DebugHangingGoCommands + if debug { + timer := time.NewTimer(1 * time.Minute) + defer timer.Stop() + select { + case err := <-resChan: + return err + case <-timer.C: + HandleHangingGoCommand(cmd.Process) + case <-ctx.Done(): + } + } else { + select { + case err := <-resChan: + return err + case <-ctx.Done(): + } + } + + // Cancelled. Interrupt and see if it ends voluntarily. + if err := cmd.Process.Signal(os.Interrupt); err == nil { + // (We used to wait only 1s but this proved + // fragile on loaded builder machines.) + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case err := <-resChan: + return err + case <-timer.C: + } + } + + // Didn't shut down in response to interrupt. Kill it hard. + // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT + // on certain platforms, such as unix. + if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug { + log.Printf("error killing the Go command: %v", err) + } + + return <-resChan +} + +func HandleHangingGoCommand(proc *os.Process) { + switch runtime.GOOS { + case "linux", "darwin", "freebsd", "netbsd": + fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND + +The gopls test runner has detected a hanging go command. In order to debug +this, the output of ps and lsof/fstat is printed below. + +See golang/go#54461 for more details.`) + + fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:") + fmt.Fprintln(os.Stderr, "-------------------------") + psCmd := exec.Command("ps", "axo", "ppid,pid,command") + psCmd.Stdout = os.Stderr + psCmd.Stderr = os.Stderr + if err := psCmd.Run(); err != nil { + panic(fmt.Sprintf("running ps: %v", err)) + } + + listFiles := "lsof" + if runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" { + listFiles = "fstat" + } + + fmt.Fprintln(os.Stderr, "\n"+listFiles+":") + fmt.Fprintln(os.Stderr, "-----") + listFilesCmd := exec.Command(listFiles) + listFilesCmd.Stdout = os.Stderr + listFilesCmd.Stderr = os.Stderr + if err := listFilesCmd.Run(); err != nil { + panic(fmt.Sprintf("running %s: %v", listFiles, err)) + } + } + panic(fmt.Sprintf("detected hanging go command (pid %d): see golang/go#54461 for more details", proc.Pid)) +} + +func cmdDebugStr(cmd *exec.Cmd) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.SplitN(kv, "=", 2) + if len(split) == 2 { + k, v := split[0], split[1] + env[k] = v + } + } + + var args []string + for _, arg := range cmd.Args { + quoted := strconv.Quote(arg) + if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") { + args = append(args, quoted) + } else { + args = append(args, arg) + } + } + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/vendor.go b/vendor/golang.org/x/tools/internal/gocommand/vendor.go new file mode 100644 index 00000000..e38d1fb4 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/vendor.go @@ -0,0 +1,163 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocommand + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + "time" + + "golang.org/x/mod/semver" +) + +// ModuleJSON holds information about a module. +type ModuleJSON struct { + Path string // module path + Version string // module version + Versions []string // available module versions (with -versions) + Replace *ModuleJSON // replaced by this module + Time *time.Time // time version was created + Update *ModuleJSON // available update, if any (with -u) + Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? + Dir string // directory holding files for this module, if any + GoMod string // path to go.mod file used when loading this module, if any + GoVersion string // go version used in module +} + +var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`) + +// VendorEnabled reports whether vendoring is enabled. It takes a *Runner to execute Go commands +// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields, +// of which only Verb and Args are modified to run the appropriate Go command. +// Inspired by setDefaultBuildMod in modload/init.go +func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, *ModuleJSON, error) { + mainMod, go114, err := getMainModuleAnd114(ctx, inv, r) + if err != nil { + return false, nil, err + } + + // We check the GOFLAGS to see if there is anything overridden or not. + inv.Verb = "env" + inv.Args = []string{"GOFLAGS"} + stdout, err := r.Run(ctx, inv) + if err != nil { + return false, nil, err + } + goflags := string(bytes.TrimSpace(stdout.Bytes())) + matches := modFlagRegexp.FindStringSubmatch(goflags) + var modFlag string + if len(matches) != 0 { + modFlag = matches[1] + } + // Don't override an explicit '-mod=' argument. + if modFlag == "vendor" { + return true, mainMod, nil + } else if modFlag != "" { + return false, nil, nil + } + if mainMod == nil || !go114 { + return false, nil, nil + } + // Check 1.14's automatic vendor mode. + if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() { + if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 { + // The Go version is at least 1.14, and a vendor directory exists. + // Set -mod=vendor by default. + return true, mainMod, nil + } + } + return false, nil, nil +} + +// getMainModuleAnd114 gets one of the main modules' information and whether the +// go command in use is 1.14+. This is the information needed to figure out +// if vendoring should be enabled. +func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) { + const format = `{{.Path}} +{{.Dir}} +{{.GoMod}} +{{.GoVersion}} +{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}} +` + inv.Verb = "list" + inv.Args = []string{"-m", "-f", format} + stdout, err := r.Run(ctx, inv) + if err != nil { + return nil, false, err + } + + lines := strings.Split(stdout.String(), "\n") + if len(lines) < 5 { + return nil, false, fmt.Errorf("unexpected stdout: %q", stdout.String()) + } + mod := &ModuleJSON{ + Path: lines[0], + Dir: lines[1], + GoMod: lines[2], + GoVersion: lines[3], + Main: true, + } + return mod, lines[4] == "go1.14", nil +} + +// WorkspaceVendorEnabled reports whether workspace vendoring is enabled. It takes a *Runner to execute Go commands +// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields, +// of which only Verb and Args are modified to run the appropriate Go command. +// Inspired by setDefaultBuildMod in modload/init.go +func WorkspaceVendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, []*ModuleJSON, error) { + inv.Verb = "env" + inv.Args = []string{"GOWORK"} + stdout, err := r.Run(ctx, inv) + if err != nil { + return false, nil, err + } + goWork := string(bytes.TrimSpace(stdout.Bytes())) + if fi, err := os.Stat(filepath.Join(filepath.Dir(goWork), "vendor")); err == nil && fi.IsDir() { + mainMods, err := getWorkspaceMainModules(ctx, inv, r) + if err != nil { + return false, nil, err + } + return true, mainMods, nil + } + return false, nil, nil +} + +// getWorkspaceMainModules gets the main modules' information. +// This is the information needed to figure out if vendoring should be enabled. +func getWorkspaceMainModules(ctx context.Context, inv Invocation, r *Runner) ([]*ModuleJSON, error) { + const format = `{{.Path}} +{{.Dir}} +{{.GoMod}} +{{.GoVersion}} +` + inv.Verb = "list" + inv.Args = []string{"-m", "-f", format} + stdout, err := r.Run(ctx, inv) + if err != nil { + return nil, err + } + + lines := strings.Split(strings.TrimSuffix(stdout.String(), "\n"), "\n") + if len(lines) < 4 { + return nil, fmt.Errorf("unexpected stdout: %q", stdout.String()) + } + mods := make([]*ModuleJSON, 0, len(lines)/4) + for i := 0; i < len(lines); i += 4 { + mods = append(mods, &ModuleJSON{ + Path: lines[i], + Dir: lines[i+1], + GoMod: lines[i+2], + GoVersion: lines[i+3], + Main: true, + }) + } + return mods, nil +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/version.go b/vendor/golang.org/x/tools/internal/gocommand/version.go new file mode 100644 index 00000000..446c5846 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/version.go @@ -0,0 +1,71 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocommand + +import ( + "context" + "fmt" + "regexp" + "strings" +) + +// GoVersion reports the minor version number of the highest release +// tag built into the go command on the PATH. +// +// Note that this may be higher than the version of the go tool used +// to build this application, and thus the versions of the standard +// go/{scanner,parser,ast,types} packages that are linked into it. +// In that case, callers should either downgrade to the version of +// go used to build the application, or report an error that the +// application is too old to use the go command on the PATH. +func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { + inv.Verb = "list" + inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`} + inv.BuildFlags = nil // This is not a build command. + inv.ModFlag = "" + inv.ModFile = "" + inv.Env = append(inv.Env[:len(inv.Env):len(inv.Env)], "GO111MODULE=off") + + stdoutBytes, err := r.Run(ctx, inv) + if err != nil { + return 0, err + } + stdout := stdoutBytes.String() + if len(stdout) < 3 { + return 0, fmt.Errorf("bad ReleaseTags output: %q", stdout) + } + // Split up "[go1.1 go1.15]" and return highest go1.X value. + tags := strings.Fields(stdout[1 : len(stdout)-2]) + for i := len(tags) - 1; i >= 0; i-- { + var version int + if _, err := fmt.Sscanf(tags[i], "go1.%d", &version); err != nil { + continue + } + return version, nil + } + return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags) +} + +// GoVersionOutput returns the complete output of the go version command. +func GoVersionOutput(ctx context.Context, inv Invocation, r *Runner) (string, error) { + inv.Verb = "version" + goVersion, err := r.Run(ctx, inv) + if err != nil { + return "", err + } + return goVersion.String(), nil +} + +// ParseGoVersionOutput extracts the Go version string +// from the output of the "go version" command. +// Given an unrecognized form, it returns an empty string. +func ParseGoVersionOutput(data string) string { + re := regexp.MustCompile(`^go version (go\S+|devel \S+)`) + m := re.FindStringSubmatch(data) + if len(m) != 2 { + return "" // unrecognized version + } + return m[1] +} diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go new file mode 100644 index 00000000..83615155 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -0,0 +1,337 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gopathwalk is like filepath.Walk but specialized for finding Go +// packages, particularly in $GOPATH and $GOROOT. +package gopathwalk + +import ( + "bufio" + "bytes" + "io" + "io/fs" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "time" +) + +// Options controls the behavior of a Walk call. +type Options struct { + // If Logf is non-nil, debug logging is enabled through this function. + Logf func(format string, args ...interface{}) + + // Search module caches. Also disables legacy goimports ignore rules. + ModulesEnabled bool + + // Maximum number of concurrent calls to user-provided callbacks, + // or 0 for GOMAXPROCS. + Concurrency int +} + +// RootType indicates the type of a Root. +type RootType int + +const ( + RootUnknown RootType = iota + RootGOROOT + RootGOPATH + RootCurrentModule + RootModuleCache + RootOther +) + +// A Root is a starting point for a Walk. +type Root struct { + Path string + Type RootType +} + +// Walk concurrently walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// +// For each package found, add will be called with the absolute +// paths of the containing source directory and the package directory. +// +// Unlike filepath.WalkDir, Walk follows symbolic links +// (while guarding against cycles). +func Walk(roots []Root, add func(root Root, dir string), opts Options) { + WalkSkip(roots, add, func(Root, string) bool { return false }, opts) +} + +// WalkSkip concurrently walks Go source directories ($GOROOT, $GOPATH, etc) to +// find packages. +// +// For each package found, add will be called with the absolute +// paths of the containing source directory and the package directory. +// For each directory that will be scanned, skip will be called +// with the absolute paths of the containing source directory and the directory. +// If skip returns false on a directory it will be processed. +// +// Unlike filepath.WalkDir, WalkSkip follows symbolic links +// (while guarding against cycles). +func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root, dir string) bool, opts Options) { + for _, root := range roots { + walkDir(root, add, skip, opts) + } +} + +// walkDir creates a walker and starts fastwalk with this walker. +func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) { + if opts.Logf == nil { + opts.Logf = func(format string, args ...interface{}) {} + } + if _, err := os.Stat(root.Path); os.IsNotExist(err) { + opts.Logf("skipping nonexistent directory: %v", root.Path) + return + } + start := time.Now() + opts.Logf("scanning %s", root.Path) + + concurrency := opts.Concurrency + if concurrency == 0 { + // The walk be either CPU-bound or I/O-bound, depending on what the + // caller-supplied add function does and the details of the user's platform + // and machine. Rather than trying to fine-tune the concurrency level for a + // specific environment, we default to GOMAXPROCS: it is likely to be a good + // choice for a CPU-bound add function, and if it is instead I/O-bound, then + // dealing with I/O saturation is arguably the job of the kernel and/or + // runtime. (Oversaturating I/O seems unlikely to harm performance as badly + // as failing to saturate would.) + concurrency = runtime.GOMAXPROCS(0) + } + w := &walker{ + root: root, + add: add, + skip: skip, + opts: opts, + sem: make(chan struct{}, concurrency), + } + w.init() + + w.sem <- struct{}{} + path := root.Path + if path == "" { + path = "." + } + if fi, err := os.Lstat(path); err == nil { + w.walk(path, nil, fs.FileInfoToDirEntry(fi)) + } else { + w.opts.Logf("scanning directory %v: %v", root.Path, err) + } + <-w.sem + w.walking.Wait() + + opts.Logf("scanned %s in %v", root.Path, time.Since(start)) +} + +// walker is the callback for fastwalk.Walk. +type walker struct { + root Root // The source directory to scan. + add func(Root, string) // The callback that will be invoked for every possible Go package dir. + skip func(Root, string) bool // The callback that will be invoked for every dir. dir is skipped if it returns true. + opts Options // Options passed to Walk by the user. + + walking sync.WaitGroup + sem chan struct{} // Channel of semaphore tokens; send to acquire, receive to release. + ignoredDirs []string + + added sync.Map // map[string]bool +} + +// A symlinkList is a linked list of os.FileInfos for parent directories +// reached via symlinks. +type symlinkList struct { + info os.FileInfo + prev *symlinkList +} + +// init initializes the walker based on its Options +func (w *walker) init() { + var ignoredPaths []string + if w.root.Type == RootModuleCache { + ignoredPaths = []string{"cache"} + } + if !w.opts.ModulesEnabled && w.root.Type == RootGOPATH { + ignoredPaths = w.getIgnoredDirs(w.root.Path) + ignoredPaths = append(ignoredPaths, "v", "mod") + } + + for _, p := range ignoredPaths { + full := filepath.Join(w.root.Path, p) + w.ignoredDirs = append(w.ignoredDirs, full) + w.opts.Logf("Directory added to ignore list: %s", full) + } +} + +// getIgnoredDirs reads an optional config file at /.goimportsignore +// of relative directories to ignore when scanning for go files. +// The provided path is one of the $GOPATH entries with "src" appended. +func (w *walker) getIgnoredDirs(path string) []string { + file := filepath.Join(path, ".goimportsignore") + slurp, err := os.ReadFile(file) + if err != nil { + w.opts.Logf("%v", err) + } else { + w.opts.Logf("Read %s", file) + } + if err != nil { + return nil + } + + var ignoredDirs []string + bs := bufio.NewScanner(bytes.NewReader(slurp)) + for bs.Scan() { + line := strings.TrimSpace(bs.Text()) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + ignoredDirs = append(ignoredDirs, line) + } + return ignoredDirs +} + +// shouldSkipDir reports whether the file should be skipped or not. +func (w *walker) shouldSkipDir(dir string) bool { + for _, ignoredDir := range w.ignoredDirs { + if dir == ignoredDir { + return true + } + } + if w.skip != nil { + // Check with the user specified callback. + return w.skip(w.root, dir) + } + return false +} + +// walk walks through the given path. +// +// Errors are logged if w.opts.Logf is non-nil, but otherwise ignored. +func (w *walker) walk(path string, pathSymlinks *symlinkList, d fs.DirEntry) { + if d.Type()&os.ModeSymlink != 0 { + // Walk the symlink's target rather than the symlink itself. + // + // (Note that os.Stat, unlike the lower-lever os.Readlink, + // follows arbitrarily many layers of symlinks, so it will eventually + // reach either a non-symlink or a nonexistent target.) + // + // TODO(bcmills): 'go list all' itself ignores symlinks within GOROOT/src + // and GOPATH/src. Do we really need to traverse them here? If so, why? + + fi, err := os.Stat(path) + if err != nil { + w.opts.Logf("%v", err) + return + } + + // Avoid walking symlink cycles: if we have already followed a symlink to + // this directory as a parent of itself, don't follow it again. + // + // This doesn't catch the first time through a cycle, but it also minimizes + // the number of extra stat calls we make if we *don't* encounter a cycle. + // Since we don't actually expect to encounter symlink cycles in practice, + // this seems like the right tradeoff. + for parent := pathSymlinks; parent != nil; parent = parent.prev { + if os.SameFile(fi, parent.info) { + return + } + } + + pathSymlinks = &symlinkList{ + info: fi, + prev: pathSymlinks, + } + d = fs.FileInfoToDirEntry(fi) + } + + if d.Type().IsRegular() { + if !strings.HasSuffix(path, ".go") { + return + } + + dir := filepath.Dir(path) + if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) { + // Doesn't make sense to have regular files + // directly in your $GOPATH/src or $GOROOT/src. + // + // TODO(bcmills): there are many levels of directory within + // RootModuleCache where this also wouldn't make sense, + // Can we generalize this to any directory without a corresponding + // import path? + return + } + + if _, dup := w.added.LoadOrStore(dir, true); !dup { + w.add(w.root, dir) + } + } + + if !d.IsDir() { + return + } + + base := filepath.Base(path) + if base == "" || base[0] == '.' || base[0] == '_' || + base == "testdata" || + (w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") || + (!w.opts.ModulesEnabled && base == "node_modules") || + w.shouldSkipDir(path) { + return + } + + // Read the directory and walk its entries. + + f, err := os.Open(path) + if err != nil { + w.opts.Logf("%v", err) + return + } + defer f.Close() + + for { + // We impose an arbitrary limit on the number of ReadDir results per + // directory to limit the amount of memory consumed for stale or upcoming + // directory entries. The limit trades off CPU (number of syscalls to read + // the whole directory) against RAM (reachable directory entries other than + // the one currently being processed). + // + // Since we process the directories recursively, we will end up maintaining + // a slice of entries for each level of the directory tree. + // (Compare https://go.dev/issue/36197.) + ents, err := f.ReadDir(1024) + if err != nil { + if err != io.EOF { + w.opts.Logf("%v", err) + } + break + } + + for _, d := range ents { + nextPath := filepath.Join(path, d.Name()) + if d.IsDir() { + select { + case w.sem <- struct{}{}: + // Got a new semaphore token, so we can traverse the directory concurrently. + d := d + w.walking.Add(1) + go func() { + defer func() { + <-w.sem + w.walking.Done() + }() + w.walk(nextPath, pathSymlinks, d) + }() + continue + + default: + // No tokens available, so traverse serially. + } + } + + w.walk(nextPath, pathSymlinks, d) + } + } +} diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go new file mode 100644 index 00000000..93d49a6e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -0,0 +1,1892 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "go/types" + "io/fs" + "io/ioutil" + "os" + "path" + "path/filepath" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/gopathwalk" + "golang.org/x/tools/internal/stdlib" +) + +// importToGroup is a list of functions which map from an import path to +// a group number. +var importToGroup = []func(localPrefix, importPath string) (num int, ok bool){ + func(localPrefix, importPath string) (num int, ok bool) { + if localPrefix == "" { + return + } + for _, p := range strings.Split(localPrefix, ",") { + if strings.HasPrefix(importPath, p) || strings.TrimSuffix(p, "/") == importPath { + return 3, true + } + } + return + }, + func(_, importPath string) (num int, ok bool) { + if strings.HasPrefix(importPath, "appengine") { + return 2, true + } + return + }, + func(_, importPath string) (num int, ok bool) { + firstComponent := strings.Split(importPath, "/")[0] + if strings.Contains(firstComponent, ".") { + return 1, true + } + return + }, +} + +func importGroup(localPrefix, importPath string) int { + for _, fn := range importToGroup { + if n, ok := fn(localPrefix, importPath); ok { + return n + } + } + return 0 +} + +type ImportFixType int + +const ( + AddImport ImportFixType = iota + DeleteImport + SetImportName +) + +type ImportFix struct { + // StmtInfo represents the import statement this fix will add, remove, or change. + StmtInfo ImportInfo + // IdentName is the identifier that this fix will add or remove. + IdentName string + // FixType is the type of fix this is (AddImport, DeleteImport, SetImportName). + FixType ImportFixType + Relevance float64 // see pkg +} + +// An ImportInfo represents a single import statement. +type ImportInfo struct { + ImportPath string // import path, e.g. "crypto/rand". + Name string // import name, e.g. "crand", or "" if none. +} + +// A packageInfo represents what's known about a package. +type packageInfo struct { + name string // real package name, if known. + exports map[string]bool // known exports. +} + +// parseOtherFiles parses all the Go files in srcDir except filename, including +// test files if filename looks like a test. +func parseOtherFiles(fset *token.FileSet, srcDir, filename string) []*ast.File { + // This could use go/packages but it doesn't buy much, and it fails + // with https://golang.org/issue/26296 in LoadFiles mode in some cases. + considerTests := strings.HasSuffix(filename, "_test.go") + + fileBase := filepath.Base(filename) + packageFileInfos, err := os.ReadDir(srcDir) + if err != nil { + return nil + } + + var files []*ast.File + for _, fi := range packageFileInfos { + if fi.Name() == fileBase || !strings.HasSuffix(fi.Name(), ".go") { + continue + } + if !considerTests && strings.HasSuffix(fi.Name(), "_test.go") { + continue + } + + f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, 0) + if err != nil { + continue + } + + files = append(files, f) + } + + return files +} + +// addGlobals puts the names of package vars into the provided map. +func addGlobals(f *ast.File, globals map[string]bool) { + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok { + continue + } + + for _, spec := range genDecl.Specs { + valueSpec, ok := spec.(*ast.ValueSpec) + if !ok { + continue + } + globals[valueSpec.Names[0].Name] = true + } + } +} + +// collectReferences builds a map of selector expressions, from +// left hand side (X) to a set of right hand sides (Sel). +func collectReferences(f *ast.File) references { + refs := references{} + + var visitor visitFn + visitor = func(node ast.Node) ast.Visitor { + if node == nil { + return visitor + } + switch v := node.(type) { + case *ast.SelectorExpr: + xident, ok := v.X.(*ast.Ident) + if !ok { + break + } + if xident.Obj != nil { + // If the parser can resolve it, it's not a package ref. + break + } + if !ast.IsExported(v.Sel.Name) { + // Whatever this is, it's not exported from a package. + break + } + pkgName := xident.Name + r := refs[pkgName] + if r == nil { + r = make(map[string]bool) + refs[pkgName] = r + } + r[v.Sel.Name] = true + } + return visitor + } + ast.Walk(visitor, f) + return refs +} + +// collectImports returns all the imports in f. +// Unnamed imports (., _) and "C" are ignored. +func collectImports(f *ast.File) []*ImportInfo { + var imports []*ImportInfo + for _, imp := range f.Imports { + var name string + if imp.Name != nil { + name = imp.Name.Name + } + if imp.Path.Value == `"C"` || name == "_" || name == "." { + continue + } + path := strings.Trim(imp.Path.Value, `"`) + imports = append(imports, &ImportInfo{ + Name: name, + ImportPath: path, + }) + } + return imports +} + +// findMissingImport searches pass's candidates for an import that provides +// pkg, containing all of syms. +func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo { + for _, candidate := range p.candidates { + pkgInfo, ok := p.knownPackages[candidate.ImportPath] + if !ok { + continue + } + if p.importIdentifier(candidate) != pkg { + continue + } + + allFound := true + for right := range syms { + if !pkgInfo.exports[right] { + allFound = false + break + } + } + + if allFound { + return candidate + } + } + return nil +} + +// references is set of references found in a Go file. The first map key is the +// left hand side of a selector expression, the second key is the right hand +// side, and the value should always be true. +type references map[string]map[string]bool + +// A pass contains all the inputs and state necessary to fix a file's imports. +// It can be modified in some ways during use; see comments below. +type pass struct { + // Inputs. These must be set before a call to load, and not modified after. + fset *token.FileSet // fset used to parse f and its siblings. + f *ast.File // the file being fixed. + srcDir string // the directory containing f. + env *ProcessEnv // the environment to use for go commands, etc. + loadRealPackageNames bool // if true, load package names from disk rather than guessing them. + otherFiles []*ast.File // sibling files. + + // Intermediate state, generated by load. + existingImports map[string][]*ImportInfo + allRefs references + missingRefs references + + // Inputs to fix. These can be augmented between successive fix calls. + lastTry bool // indicates that this is the last call and fix should clean up as best it can. + candidates []*ImportInfo // candidate imports in priority order. + knownPackages map[string]*packageInfo // information about all known packages. +} + +// loadPackageNames saves the package names for everything referenced by imports. +func (p *pass) loadPackageNames(imports []*ImportInfo) error { + if p.env.Logf != nil { + p.env.Logf("loading package names for %v packages", len(imports)) + defer func() { + p.env.Logf("done loading package names for %v packages", len(imports)) + }() + } + var unknown []string + for _, imp := range imports { + if _, ok := p.knownPackages[imp.ImportPath]; ok { + continue + } + unknown = append(unknown, imp.ImportPath) + } + + resolver, err := p.env.GetResolver() + if err != nil { + return err + } + + names, err := resolver.loadPackageNames(unknown, p.srcDir) + if err != nil { + return err + } + + for path, name := range names { + p.knownPackages[path] = &packageInfo{ + name: name, + exports: map[string]bool{}, + } + } + return nil +} + +// if there is a trailing major version, remove it +func withoutVersion(nm string) string { + if v := path.Base(nm); len(v) > 0 && v[0] == 'v' { + if _, err := strconv.Atoi(v[1:]); err == nil { + // this is, for instance, called with rand/v2 and returns rand + if len(v) < len(nm) { + xnm := nm[:len(nm)-len(v)-1] + return path.Base(xnm) + } + } + } + return nm +} + +// importIdentifier returns the identifier that imp will introduce. It will +// guess if the package name has not been loaded, e.g. because the source +// is not available. +func (p *pass) importIdentifier(imp *ImportInfo) string { + if imp.Name != "" { + return imp.Name + } + known := p.knownPackages[imp.ImportPath] + if known != nil && known.name != "" { + return withoutVersion(known.name) + } + return ImportPathToAssumedName(imp.ImportPath) +} + +// load reads in everything necessary to run a pass, and reports whether the +// file already has all the imports it needs. It fills in p.missingRefs with the +// file's missing symbols, if any, or removes unused imports if not. +func (p *pass) load() ([]*ImportFix, bool) { + p.knownPackages = map[string]*packageInfo{} + p.missingRefs = references{} + p.existingImports = map[string][]*ImportInfo{} + + // Load basic information about the file in question. + p.allRefs = collectReferences(p.f) + + // Load stuff from other files in the same package: + // global variables so we know they don't need resolving, and imports + // that we might want to mimic. + globals := map[string]bool{} + for _, otherFile := range p.otherFiles { + // Don't load globals from files that are in the same directory + // but a different package. Using them to suggest imports is OK. + if p.f.Name.Name == otherFile.Name.Name { + addGlobals(otherFile, globals) + } + p.candidates = append(p.candidates, collectImports(otherFile)...) + } + + // Resolve all the import paths we've seen to package names, and store + // f's imports by the identifier they introduce. + imports := collectImports(p.f) + if p.loadRealPackageNames { + err := p.loadPackageNames(append(imports, p.candidates...)) + if err != nil { + if p.env.Logf != nil { + p.env.Logf("loading package names: %v", err) + } + return nil, false + } + } + for _, imp := range imports { + p.existingImports[p.importIdentifier(imp)] = append(p.existingImports[p.importIdentifier(imp)], imp) + } + + // Find missing references. + for left, rights := range p.allRefs { + if globals[left] { + continue + } + _, ok := p.existingImports[left] + if !ok { + p.missingRefs[left] = rights + continue + } + } + if len(p.missingRefs) != 0 { + return nil, false + } + + return p.fix() +} + +// fix attempts to satisfy missing imports using p.candidates. If it finds +// everything, or if p.lastTry is true, it updates fixes to add the imports it found, +// delete anything unused, and update import names, and returns true. +func (p *pass) fix() ([]*ImportFix, bool) { + // Find missing imports. + var selected []*ImportInfo + for left, rights := range p.missingRefs { + if imp := p.findMissingImport(left, rights); imp != nil { + selected = append(selected, imp) + } + } + + if !p.lastTry && len(selected) != len(p.missingRefs) { + return nil, false + } + + // Found everything, or giving up. Add the new imports and remove any unused. + var fixes []*ImportFix + for _, identifierImports := range p.existingImports { + for _, imp := range identifierImports { + // We deliberately ignore globals here, because we can't be sure + // they're in the same package. People do things like put multiple + // main packages in the same directory, and we don't want to + // remove imports if they happen to have the same name as a var in + // a different package. + if _, ok := p.allRefs[p.importIdentifier(imp)]; !ok { + fixes = append(fixes, &ImportFix{ + StmtInfo: *imp, + IdentName: p.importIdentifier(imp), + FixType: DeleteImport, + }) + continue + } + + // An existing import may need to update its import name to be correct. + if name := p.importSpecName(imp); name != imp.Name { + fixes = append(fixes, &ImportFix{ + StmtInfo: ImportInfo{ + Name: name, + ImportPath: imp.ImportPath, + }, + IdentName: p.importIdentifier(imp), + FixType: SetImportName, + }) + } + } + } + // Collecting fixes involved map iteration, so sort for stability. See + // golang/go#59976. + sortFixes(fixes) + + // collect selected fixes in a separate slice, so that it can be sorted + // separately. Note that these fixes must occur after fixes to existing + // imports. TODO(rfindley): figure out why. + var selectedFixes []*ImportFix + for _, imp := range selected { + selectedFixes = append(selectedFixes, &ImportFix{ + StmtInfo: ImportInfo{ + Name: p.importSpecName(imp), + ImportPath: imp.ImportPath, + }, + IdentName: p.importIdentifier(imp), + FixType: AddImport, + }) + } + sortFixes(selectedFixes) + + return append(fixes, selectedFixes...), true +} + +func sortFixes(fixes []*ImportFix) { + sort.Slice(fixes, func(i, j int) bool { + fi, fj := fixes[i], fixes[j] + if fi.StmtInfo.ImportPath != fj.StmtInfo.ImportPath { + return fi.StmtInfo.ImportPath < fj.StmtInfo.ImportPath + } + if fi.StmtInfo.Name != fj.StmtInfo.Name { + return fi.StmtInfo.Name < fj.StmtInfo.Name + } + if fi.IdentName != fj.IdentName { + return fi.IdentName < fj.IdentName + } + return fi.FixType < fj.FixType + }) +} + +// importSpecName gets the import name of imp in the import spec. +// +// When the import identifier matches the assumed import name, the import name does +// not appear in the import spec. +func (p *pass) importSpecName(imp *ImportInfo) string { + // If we did not load the real package names, or the name is already set, + // we just return the existing name. + if !p.loadRealPackageNames || imp.Name != "" { + return imp.Name + } + + ident := p.importIdentifier(imp) + if ident == ImportPathToAssumedName(imp.ImportPath) { + return "" // ident not needed since the assumed and real names are the same. + } + return ident +} + +// apply will perform the fixes on f in order. +func apply(fset *token.FileSet, f *ast.File, fixes []*ImportFix) { + for _, fix := range fixes { + switch fix.FixType { + case DeleteImport: + astutil.DeleteNamedImport(fset, f, fix.StmtInfo.Name, fix.StmtInfo.ImportPath) + case AddImport: + astutil.AddNamedImport(fset, f, fix.StmtInfo.Name, fix.StmtInfo.ImportPath) + case SetImportName: + // Find the matching import path and change the name. + for _, spec := range f.Imports { + path := strings.Trim(spec.Path.Value, `"`) + if path == fix.StmtInfo.ImportPath { + spec.Name = &ast.Ident{ + Name: fix.StmtInfo.Name, + NamePos: spec.Pos(), + } + } + } + } + } +} + +// assumeSiblingImportsValid assumes that siblings' use of packages is valid, +// adding the exports they use. +func (p *pass) assumeSiblingImportsValid() { + for _, f := range p.otherFiles { + refs := collectReferences(f) + imports := collectImports(f) + importsByName := map[string]*ImportInfo{} + for _, imp := range imports { + importsByName[p.importIdentifier(imp)] = imp + } + for left, rights := range refs { + if imp, ok := importsByName[left]; ok { + if m, ok := stdlib.PackageSymbols[imp.ImportPath]; ok { + // We have the stdlib in memory; no need to guess. + rights = symbolNameSet(m) + } + p.addCandidate(imp, &packageInfo{ + // no name; we already know it. + exports: rights, + }) + } + } + } +} + +// addCandidate adds a candidate import to p, and merges in the information +// in pkg. +func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) { + p.candidates = append(p.candidates, imp) + if existing, ok := p.knownPackages[imp.ImportPath]; ok { + if existing.name == "" { + existing.name = pkg.name + } + for export := range pkg.exports { + existing.exports[export] = true + } + } else { + p.knownPackages[imp.ImportPath] = pkg + } +} + +// fixImports adds and removes imports from f so that all its references are +// satisfied and there are no unused imports. +// +// This is declared as a variable rather than a function so goimports can +// easily be extended by adding a file with an init function. +var fixImports = fixImportsDefault + +func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error { + fixes, err := getFixes(context.Background(), fset, f, filename, env) + if err != nil { + return err + } + apply(fset, f, fixes) + return err +} + +// getFixes gets the import fixes that need to be made to f in order to fix the imports. +// It does not modify the ast. +func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) { + abs, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + srcDir := filepath.Dir(abs) + if env.Logf != nil { + env.Logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir) + } + + // First pass: looking only at f, and using the naive algorithm to + // derive package names from import paths, see if the file is already + // complete. We can't add any imports yet, because we don't know + // if missing references are actually package vars. + p := &pass{fset: fset, f: f, srcDir: srcDir, env: env} + if fixes, done := p.load(); done { + return fixes, nil + } + + otherFiles := parseOtherFiles(fset, srcDir, filename) + + // Second pass: add information from other files in the same package, + // like their package vars and imports. + p.otherFiles = otherFiles + if fixes, done := p.load(); done { + return fixes, nil + } + + // Now we can try adding imports from the stdlib. + p.assumeSiblingImportsValid() + addStdlibCandidates(p, p.missingRefs) + if fixes, done := p.fix(); done { + return fixes, nil + } + + // Third pass: get real package names where we had previously used + // the naive algorithm. + p = &pass{fset: fset, f: f, srcDir: srcDir, env: env} + p.loadRealPackageNames = true + p.otherFiles = otherFiles + if fixes, done := p.load(); done { + return fixes, nil + } + + if err := addStdlibCandidates(p, p.missingRefs); err != nil { + return nil, err + } + p.assumeSiblingImportsValid() + if fixes, done := p.fix(); done { + return fixes, nil + } + + // Go look for candidates in $GOPATH, etc. We don't necessarily load + // the real exports of sibling imports, so keep assuming their contents. + if err := addExternalCandidates(ctx, p, p.missingRefs, filename); err != nil { + return nil, err + } + + p.lastTry = true + fixes, _ := p.fix() + return fixes, nil +} + +// MaxRelevance is the highest relevance, used for the standard library. +// Chosen arbitrarily to match pre-existing gopls code. +const MaxRelevance = 7.0 + +// getCandidatePkgs works with the passed callback to find all acceptable packages. +// It deduplicates by import path, and uses a cached stdlib rather than reading +// from disk. +func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filename, filePkg string, env *ProcessEnv) error { + notSelf := func(p *pkg) bool { + return p.packageName != filePkg || p.dir != filepath.Dir(filename) + } + goenv, err := env.goEnv() + if err != nil { + return err + } + + var mu sync.Mutex // to guard asynchronous access to dupCheck + dupCheck := map[string]struct{}{} + + // Start off with the standard library. + for importPath, symbols := range stdlib.PackageSymbols { + p := &pkg{ + dir: filepath.Join(goenv["GOROOT"], "src", importPath), + importPathShort: importPath, + packageName: path.Base(importPath), + relevance: MaxRelevance, + } + dupCheck[importPath] = struct{}{} + if notSelf(p) && wrappedCallback.dirFound(p) && wrappedCallback.packageNameLoaded(p) { + var exports []stdlib.Symbol + for _, sym := range symbols { + switch sym.Kind { + case stdlib.Func, stdlib.Type, stdlib.Var, stdlib.Const: + exports = append(exports, sym) + } + } + wrappedCallback.exportsLoaded(p, exports) + } + } + + scanFilter := &scanCallback{ + rootFound: func(root gopathwalk.Root) bool { + // Exclude goroot results -- getting them is relatively expensive, not cached, + // and generally redundant with the in-memory version. + return root.Type != gopathwalk.RootGOROOT && wrappedCallback.rootFound(root) + }, + dirFound: wrappedCallback.dirFound, + packageNameLoaded: func(pkg *pkg) bool { + mu.Lock() + defer mu.Unlock() + if _, ok := dupCheck[pkg.importPathShort]; ok { + return false + } + dupCheck[pkg.importPathShort] = struct{}{} + return notSelf(pkg) && wrappedCallback.packageNameLoaded(pkg) + }, + exportsLoaded: func(pkg *pkg, exports []stdlib.Symbol) { + // If we're an x_test, load the package under test's test variant. + if strings.HasSuffix(filePkg, "_test") && pkg.dir == filepath.Dir(filename) { + var err error + _, exports, err = loadExportsFromFiles(ctx, env, pkg.dir, true) + if err != nil { + return + } + } + wrappedCallback.exportsLoaded(pkg, exports) + }, + } + resolver, err := env.GetResolver() + if err != nil { + return err + } + return resolver.scan(ctx, scanFilter) +} + +func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) (map[string]float64, error) { + result := make(map[string]float64) + resolver, err := env.GetResolver() + if err != nil { + return nil, err + } + for _, path := range paths { + result[path] = resolver.scoreImportPath(ctx, path) + } + return result, nil +} + +func PrimeCache(ctx context.Context, resolver Resolver) error { + // Fully scan the disk for directories, but don't actually read any Go files. + callback := &scanCallback{ + rootFound: func(root gopathwalk.Root) bool { + // See getCandidatePkgs: walking GOROOT is apparently expensive and + // unnecessary. + return root.Type != gopathwalk.RootGOROOT + }, + dirFound: func(pkg *pkg) bool { + return false + }, + // packageNameLoaded and exportsLoaded must never be called. + } + + return resolver.scan(ctx, callback) +} + +func candidateImportName(pkg *pkg) string { + if ImportPathToAssumedName(pkg.importPathShort) != pkg.packageName { + return pkg.packageName + } + return "" +} + +// GetAllCandidates calls wrapped for each package whose name starts with +// searchPrefix, and can be imported from filename with the package name filePkg. +// +// Beware that the wrapped function may be called multiple times concurrently. +// TODO(adonovan): encapsulate the concurrency. +func GetAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error { + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + if !canUse(filename, pkg.dir) { + return false + } + // Try the assumed package name first, then a simpler path match + // in case of packages named vN, which are not uncommon. + return strings.HasPrefix(ImportPathToAssumedName(pkg.importPathShort), searchPrefix) || + strings.HasPrefix(path.Base(pkg.importPathShort), searchPrefix) + }, + packageNameLoaded: func(pkg *pkg) bool { + if !strings.HasPrefix(pkg.packageName, searchPrefix) { + return false + } + wrapped(ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: pkg.importPathShort, + Name: candidateImportName(pkg), + }, + IdentName: pkg.packageName, + FixType: AddImport, + Relevance: pkg.relevance, + }) + return false + }, + } + return getCandidatePkgs(ctx, callback, filename, filePkg, env) +} + +// GetImportPaths calls wrapped for each package whose import path starts with +// searchPrefix, and can be imported from filename with the package name filePkg. +func GetImportPaths(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error { + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + if !canUse(filename, pkg.dir) { + return false + } + return strings.HasPrefix(pkg.importPathShort, searchPrefix) + }, + packageNameLoaded: func(pkg *pkg) bool { + wrapped(ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: pkg.importPathShort, + Name: candidateImportName(pkg), + }, + IdentName: pkg.packageName, + FixType: AddImport, + Relevance: pkg.relevance, + }) + return false + }, + } + return getCandidatePkgs(ctx, callback, filename, filePkg, env) +} + +// A PackageExport is a package and its exports. +type PackageExport struct { + Fix *ImportFix + Exports []stdlib.Symbol +} + +// GetPackageExports returns all known packages with name pkg and their exports. +func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchPkg, filename, filePkg string, env *ProcessEnv) error { + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + return pkgIsCandidate(filename, references{searchPkg: nil}, pkg) + }, + packageNameLoaded: func(pkg *pkg) bool { + return pkg.packageName == searchPkg + }, + exportsLoaded: func(pkg *pkg, exports []stdlib.Symbol) { + sortSymbols(exports) + wrapped(PackageExport{ + Fix: &ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: pkg.importPathShort, + Name: candidateImportName(pkg), + }, + IdentName: pkg.packageName, + FixType: AddImport, + Relevance: pkg.relevance, + }, + Exports: exports, + }) + }, + } + return getCandidatePkgs(ctx, callback, filename, filePkg, env) +} + +// TODO(rfindley): we should depend on GOOS and GOARCH, to provide accurate +// imports when doing cross-platform development. +var requiredGoEnvVars = []string{ + "GO111MODULE", + "GOFLAGS", + "GOINSECURE", + "GOMOD", + "GOMODCACHE", + "GONOPROXY", + "GONOSUMDB", + "GOPATH", + "GOPROXY", + "GOROOT", + "GOSUMDB", + "GOWORK", +} + +// ProcessEnv contains environment variables and settings that affect the use of +// the go command, the go/build package, etc. +// +// ...a ProcessEnv *also* overwrites its Env along with derived state in the +// form of the resolver. And because it is lazily initialized, an env may just +// be broken and unusable, but there is no way for the caller to detect that: +// all queries will just fail. +// +// TODO(rfindley): refactor this package so that this type (perhaps renamed to +// just Env or Config) is an immutable configuration struct, to be exchanged +// for an initialized object via a constructor that returns an error. Perhaps +// the signature should be `func NewResolver(*Env) (*Resolver, error)`, where +// resolver is a concrete type used for resolving imports. Via this +// refactoring, we can avoid the need to call ProcessEnv.init and +// ProcessEnv.GoEnv everywhere, and implicitly fix all the places where this +// these are misused. Also, we'd delegate the caller the decision of how to +// handle a broken environment. +type ProcessEnv struct { + GocmdRunner *gocommand.Runner + + BuildFlags []string + ModFlag string + + // SkipPathInScan returns true if the path should be skipped from scans of + // the RootCurrentModule root type. The function argument is a clean, + // absolute path. + SkipPathInScan func(string) bool + + // Env overrides the OS environment, and can be used to specify + // GOPROXY, GO111MODULE, etc. PATH cannot be set here, because + // exec.Command will not honor it. + // Specifying all of requiredGoEnvVars avoids a call to `go env`. + Env map[string]string + + WorkingDir string + + // If Logf is non-nil, debug logging is enabled through this function. + Logf func(format string, args ...interface{}) + + // If set, ModCache holds a shared cache of directory info to use across + // multiple ProcessEnvs. + ModCache *DirInfoCache + + initialized bool // see TODO above + + // resolver and resolverErr are lazily evaluated (see GetResolver). + // This is unclean, but see the big TODO in the docstring for ProcessEnv + // above: for now, we can't be sure that the ProcessEnv is fully initialized. + resolver Resolver + resolverErr error +} + +func (e *ProcessEnv) goEnv() (map[string]string, error) { + if err := e.init(); err != nil { + return nil, err + } + return e.Env, nil +} + +func (e *ProcessEnv) matchFile(dir, name string) (bool, error) { + bctx, err := e.buildContext() + if err != nil { + return false, err + } + return bctx.MatchFile(dir, name) +} + +// CopyConfig copies the env's configuration into a new env. +func (e *ProcessEnv) CopyConfig() *ProcessEnv { + copy := &ProcessEnv{ + GocmdRunner: e.GocmdRunner, + initialized: e.initialized, + BuildFlags: e.BuildFlags, + Logf: e.Logf, + WorkingDir: e.WorkingDir, + resolver: nil, + Env: map[string]string{}, + } + for k, v := range e.Env { + copy.Env[k] = v + } + return copy +} + +func (e *ProcessEnv) init() error { + if e.initialized { + return nil + } + + foundAllRequired := true + for _, k := range requiredGoEnvVars { + if _, ok := e.Env[k]; !ok { + foundAllRequired = false + break + } + } + if foundAllRequired { + e.initialized = true + return nil + } + + if e.Env == nil { + e.Env = map[string]string{} + } + + goEnv := map[string]string{} + stdout, err := e.invokeGo(context.TODO(), "env", append([]string{"-json"}, requiredGoEnvVars...)...) + if err != nil { + return err + } + if err := json.Unmarshal(stdout.Bytes(), &goEnv); err != nil { + return err + } + for k, v := range goEnv { + e.Env[k] = v + } + e.initialized = true + return nil +} + +func (e *ProcessEnv) env() []string { + var env []string // the gocommand package will prepend os.Environ. + for k, v := range e.Env { + env = append(env, k+"="+v) + } + return env +} + +func (e *ProcessEnv) GetResolver() (Resolver, error) { + if err := e.init(); err != nil { + return nil, err + } + + if e.resolver == nil && e.resolverErr == nil { + // TODO(rfindley): we should only use a gopathResolver here if the working + // directory is actually *in* GOPATH. (I seem to recall an open gopls issue + // for this behavior, but I can't find it). + // + // For gopls, we can optionally explicitly choose a resolver type, since we + // already know the view type. + if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 { + e.resolver = newGopathResolver(e) + } else if r, err := newModuleResolver(e, e.ModCache); err != nil { + e.resolverErr = err + } else { + e.resolver = Resolver(r) + } + } + + return e.resolver, e.resolverErr +} + +// buildContext returns the build.Context to use for matching files. +// +// TODO(rfindley): support dynamic GOOS, GOARCH here, when doing cross-platform +// development. +func (e *ProcessEnv) buildContext() (*build.Context, error) { + ctx := build.Default + goenv, err := e.goEnv() + if err != nil { + return nil, err + } + ctx.GOROOT = goenv["GOROOT"] + ctx.GOPATH = goenv["GOPATH"] + + // As of Go 1.14, build.Context has a Dir field + // (see golang.org/issue/34860). + // Populate it only if present. + rc := reflect.ValueOf(&ctx).Elem() + dir := rc.FieldByName("Dir") + if dir.IsValid() && dir.Kind() == reflect.String { + dir.SetString(e.WorkingDir) + } + + // Since Go 1.11, go/build.Context.Import may invoke 'go list' depending on + // the value in GO111MODULE in the process's environment. We always want to + // run in GOPATH mode when calling Import, so we need to prevent this from + // happening. In Go 1.16, GO111MODULE defaults to "on", so this problem comes + // up more frequently. + // + // HACK: setting any of the Context I/O hooks prevents Import from invoking + // 'go list', regardless of GO111MODULE. This is undocumented, but it's + // unlikely to change before GOPATH support is removed. + ctx.ReadDir = ioutil.ReadDir + + return &ctx, nil +} + +func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string) (*bytes.Buffer, error) { + inv := gocommand.Invocation{ + Verb: verb, + Args: args, + BuildFlags: e.BuildFlags, + Env: e.env(), + Logf: e.Logf, + WorkingDir: e.WorkingDir, + } + return e.GocmdRunner.Run(ctx, inv) +} + +func addStdlibCandidates(pass *pass, refs references) error { + goenv, err := pass.env.goEnv() + if err != nil { + return err + } + localbase := func(nm string) string { + ans := path.Base(nm) + if ans[0] == 'v' { + // this is called, for instance, with math/rand/v2 and returns rand/v2 + if _, err := strconv.Atoi(ans[1:]); err == nil { + ix := strings.LastIndex(nm, ans) + more := path.Base(nm[:ix]) + ans = path.Join(more, ans) + } + } + return ans + } + add := func(pkg string) { + // Prevent self-imports. + if path.Base(pkg) == pass.f.Name.Name && filepath.Join(goenv["GOROOT"], "src", pkg) == pass.srcDir { + return + } + exports := symbolNameSet(stdlib.PackageSymbols[pkg]) + pass.addCandidate( + &ImportInfo{ImportPath: pkg}, + &packageInfo{name: localbase(pkg), exports: exports}) + } + for left := range refs { + if left == "rand" { + // Make sure we try crypto/rand before any version of math/rand as both have Int() + // and our policy is to recommend crypto + add("crypto/rand") + // if the user's no later than go1.21, this should be "math/rand" + // but we have no way of figuring out what the user is using + // TODO: investigate using the toolchain version to disambiguate in the stdlib + add("math/rand/v2") + continue + } + for importPath := range stdlib.PackageSymbols { + if path.Base(importPath) == left { + add(importPath) + } + } + } + return nil +} + +// A Resolver does the build-system-specific parts of goimports. +type Resolver interface { + // loadPackageNames loads the package names in importPaths. + loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) + + // scan works with callback to search for packages. See scanCallback for details. + scan(ctx context.Context, callback *scanCallback) error + + // loadExports returns the set of exported symbols in the package at dir. + // loadExports may be called concurrently. + loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error) + + // scoreImportPath returns the relevance for an import path. + scoreImportPath(ctx context.Context, path string) float64 + + // ClearForNewScan returns a new Resolver based on the receiver that has + // cleared its internal caches of directory contents. + // + // The new resolver should be primed and then set via + // [ProcessEnv.UpdateResolver]. + ClearForNewScan() Resolver +} + +// A scanCallback controls a call to scan and receives its results. +// In general, minor errors will be silently discarded; a user should not +// expect to receive a full series of calls for everything. +type scanCallback struct { + // rootFound is called before scanning a new root dir. If it returns true, + // the root will be scanned. Returning false will not necessarily prevent + // directories from that root making it to dirFound. + rootFound func(gopathwalk.Root) bool + // dirFound is called when a directory is found that is possibly a Go package. + // pkg will be populated with everything except packageName. + // If it returns true, the package's name will be loaded. + dirFound func(pkg *pkg) bool + // packageNameLoaded is called when a package is found and its name is loaded. + // If it returns true, the package's exports will be loaded. + packageNameLoaded func(pkg *pkg) bool + // exportsLoaded is called when a package's exports have been loaded. + exportsLoaded func(pkg *pkg, exports []stdlib.Symbol) +} + +func addExternalCandidates(ctx context.Context, pass *pass, refs references, filename string) error { + ctx, done := event.Start(ctx, "imports.addExternalCandidates") + defer done() + + var mu sync.Mutex + found := make(map[string][]pkgDistance) + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true // We want everything. + }, + dirFound: func(pkg *pkg) bool { + return pkgIsCandidate(filename, refs, pkg) + }, + packageNameLoaded: func(pkg *pkg) bool { + if _, want := refs[pkg.packageName]; !want { + return false + } + if pkg.dir == pass.srcDir && pass.f.Name.Name == pkg.packageName { + // The candidate is in the same directory and has the + // same package name. Don't try to import ourselves. + return false + } + if !canUse(filename, pkg.dir) { + return false + } + mu.Lock() + defer mu.Unlock() + found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(pass.srcDir, pkg.dir)}) + return false // We'll do our own loading after we sort. + }, + } + resolver, err := pass.env.GetResolver() + if err != nil { + return err + } + if err = resolver.scan(context.Background(), callback); err != nil { + return err + } + + // Search for imports matching potential package references. + type result struct { + imp *ImportInfo + pkg *packageInfo + } + results := make(chan result, len(refs)) + + ctx, cancel := context.WithCancel(context.TODO()) + var wg sync.WaitGroup + defer func() { + cancel() + wg.Wait() + }() + var ( + firstErr error + firstErrOnce sync.Once + ) + for pkgName, symbols := range refs { + wg.Add(1) + go func(pkgName string, symbols map[string]bool) { + defer wg.Done() + + found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols) + + if err != nil { + firstErrOnce.Do(func() { + firstErr = err + cancel() + }) + return + } + + if found == nil { + return // No matching package. + } + + imp := &ImportInfo{ + ImportPath: found.importPathShort, + } + + pkg := &packageInfo{ + name: pkgName, + exports: symbols, + } + results <- result{imp, pkg} + }(pkgName, symbols) + } + go func() { + wg.Wait() + close(results) + }() + + for result := range results { + // Don't offer completions that would shadow predeclared + // names, such as github.com/coreos/etcd/error. + if types.Universe.Lookup(result.pkg.name) != nil { // predeclared + // Ideally we would skip this candidate only + // if the predeclared name is actually + // referenced by the file, but that's a lot + // trickier to compute and would still create + // an import that is likely to surprise the + // user before long. + continue + } + pass.addCandidate(result.imp, result.pkg) + } + return firstErr +} + +// notIdentifier reports whether ch is an invalid identifier character. +func notIdentifier(ch rune) bool { + return !('a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || + '0' <= ch && ch <= '9' || + ch == '_' || + ch >= utf8.RuneSelf && (unicode.IsLetter(ch) || unicode.IsDigit(ch))) +} + +// ImportPathToAssumedName returns the assumed package name of an import path. +// It does this using only string parsing of the import path. +// It picks the last element of the path that does not look like a major +// version, and then picks the valid identifier off the start of that element. +// It is used to determine if a local rename should be added to an import for +// clarity. +// This function could be moved to a standard package and exported if we want +// for use in other tools. +func ImportPathToAssumedName(importPath string) string { + base := path.Base(importPath) + if strings.HasPrefix(base, "v") { + if _, err := strconv.Atoi(base[1:]); err == nil { + dir := path.Dir(importPath) + if dir != "." { + base = path.Base(dir) + } + } + } + base = strings.TrimPrefix(base, "go-") + if i := strings.IndexFunc(base, notIdentifier); i >= 0 { + base = base[:i] + } + return base +} + +// gopathResolver implements resolver for GOPATH workspaces. +type gopathResolver struct { + env *ProcessEnv + walked bool + cache *DirInfoCache + scanSema chan struct{} // scanSema prevents concurrent scans. +} + +func newGopathResolver(env *ProcessEnv) *gopathResolver { + r := &gopathResolver{ + env: env, + cache: NewDirInfoCache(), + scanSema: make(chan struct{}, 1), + } + r.scanSema <- struct{}{} + return r +} + +func (r *gopathResolver) ClearForNewScan() Resolver { + return newGopathResolver(r.env) +} + +func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { + names := map[string]string{} + bctx, err := r.env.buildContext() + if err != nil { + return nil, err + } + for _, path := range importPaths { + names[path] = importPathToName(bctx, path, srcDir) + } + return names, nil +} + +// importPathToName finds out the actual package name, as declared in its .go files. +func importPathToName(bctx *build.Context, importPath, srcDir string) string { + // Fast path for standard library without going to disk. + if stdlib.HasPackage(importPath) { + return path.Base(importPath) // stdlib packages always match their paths. + } + + buildPkg, err := bctx.Import(importPath, srcDir, build.FindOnly) + if err != nil { + return "" + } + pkgName, err := packageDirToName(buildPkg.Dir) + if err != nil { + return "" + } + return pkgName +} + +// packageDirToName is a faster version of build.Import if +// the only thing desired is the package name. Given a directory, +// packageDirToName then only parses one file in the package, +// trusting that the files in the directory are consistent. +func packageDirToName(dir string) (packageName string, err error) { + d, err := os.Open(dir) + if err != nil { + return "", err + } + names, err := d.Readdirnames(-1) + d.Close() + if err != nil { + return "", err + } + sort.Strings(names) // to have predictable behavior + var lastErr error + var nfile int + for _, name := range names { + if !strings.HasSuffix(name, ".go") { + continue + } + if strings.HasSuffix(name, "_test.go") { + continue + } + nfile++ + fullFile := filepath.Join(dir, name) + + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, fullFile, nil, parser.PackageClauseOnly) + if err != nil { + lastErr = err + continue + } + pkgName := f.Name.Name + if pkgName == "documentation" { + // Special case from go/build.ImportDir, not + // handled by ctx.MatchFile. + continue + } + if pkgName == "main" { + // Also skip package main, assuming it's a +build ignore generator or example. + // Since you can't import a package main anyway, there's no harm here. + continue + } + return pkgName, nil + } + if lastErr != nil { + return "", lastErr + } + return "", fmt.Errorf("no importable package found in %d Go files", nfile) +} + +type pkg struct { + dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http") + importPathShort string // vendorless import path ("net/http", "a/b") + packageName string // package name loaded from source if requested + relevance float64 // a weakly-defined score of how relevant a package is. 0 is most relevant. +} + +type pkgDistance struct { + pkg *pkg + distance int // relative distance to target +} + +// byDistanceOrImportPathShortLength sorts by relative distance breaking ties +// on the short import path length and then the import string itself. +type byDistanceOrImportPathShortLength []pkgDistance + +func (s byDistanceOrImportPathShortLength) Len() int { return len(s) } +func (s byDistanceOrImportPathShortLength) Less(i, j int) bool { + di, dj := s[i].distance, s[j].distance + if di == -1 { + return false + } + if dj == -1 { + return true + } + if di != dj { + return di < dj + } + + vi, vj := s[i].pkg.importPathShort, s[j].pkg.importPathShort + if len(vi) != len(vj) { + return len(vi) < len(vj) + } + return vi < vj +} +func (s byDistanceOrImportPathShortLength) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func distance(basepath, targetpath string) int { + p, err := filepath.Rel(basepath, targetpath) + if err != nil { + return -1 + } + if p == "." { + return 0 + } + return strings.Count(p, string(filepath.Separator)) + 1 +} + +func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error { + add := func(root gopathwalk.Root, dir string) { + // We assume cached directories have not changed. We can skip them and their + // children. + if _, ok := r.cache.Load(dir); ok { + return + } + + importpath := filepath.ToSlash(dir[len(root.Path)+len("/"):]) + info := directoryPackageInfo{ + status: directoryScanned, + dir: dir, + rootType: root.Type, + nonCanonicalImportPath: VendorlessPath(importpath), + } + r.cache.Store(dir, info) + } + processDir := func(info directoryPackageInfo) { + // Skip this directory if we were not able to get the package information successfully. + if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { + return + } + + p := &pkg{ + importPathShort: info.nonCanonicalImportPath, + dir: info.dir, + relevance: MaxRelevance - 1, + } + if info.rootType == gopathwalk.RootGOROOT { + p.relevance = MaxRelevance + } + + if !callback.dirFound(p) { + return + } + var err error + p.packageName, err = r.cache.CachePackageName(info) + if err != nil { + return + } + + if !callback.packageNameLoaded(p) { + return + } + if _, exports, err := r.loadExports(ctx, p, false); err == nil { + callback.exportsLoaded(p, exports) + } + } + stop := r.cache.ScanAndListen(ctx, processDir) + defer stop() + + goenv, err := r.env.goEnv() + if err != nil { + return err + } + var roots []gopathwalk.Root + roots = append(roots, gopathwalk.Root{Path: filepath.Join(goenv["GOROOT"], "src"), Type: gopathwalk.RootGOROOT}) + for _, p := range filepath.SplitList(goenv["GOPATH"]) { + roots = append(roots, gopathwalk.Root{Path: filepath.Join(p, "src"), Type: gopathwalk.RootGOPATH}) + } + // The callback is not necessarily safe to use in the goroutine below. Process roots eagerly. + roots = filterRoots(roots, callback.rootFound) + // We can't cancel walks, because we need them to finish to have a usable + // cache. Instead, run them in a separate goroutine and detach. + scanDone := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + return + case <-r.scanSema: + } + defer func() { r.scanSema <- struct{}{} }() + gopathwalk.Walk(roots, add, gopathwalk.Options{Logf: r.env.Logf, ModulesEnabled: false}) + close(scanDone) + }() + select { + case <-ctx.Done(): + case <-scanDone: + } + return nil +} + +func (r *gopathResolver) scoreImportPath(ctx context.Context, path string) float64 { + if stdlib.HasPackage(path) { + return MaxRelevance + } + return MaxRelevance - 1 +} + +func filterRoots(roots []gopathwalk.Root, include func(gopathwalk.Root) bool) []gopathwalk.Root { + var result []gopathwalk.Root + for _, root := range roots { + if !include(root) { + continue + } + result = append(result, root) + } + return result +} + +func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error) { + if info, ok := r.cache.Load(pkg.dir); ok && !includeTest { + return r.cache.CacheExports(ctx, r.env, info) + } + return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest) +} + +// VendorlessPath returns the devendorized version of the import path ipath. +// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b". +func VendorlessPath(ipath string) string { + // Devendorize for use in import statement. + if i := strings.LastIndex(ipath, "/vendor/"); i >= 0 { + return ipath[i+len("/vendor/"):] + } + if strings.HasPrefix(ipath, "vendor/") { + return ipath[len("vendor/"):] + } + return ipath +} + +func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, includeTest bool) (string, []stdlib.Symbol, error) { + // Look for non-test, buildable .go files which could provide exports. + all, err := os.ReadDir(dir) + if err != nil { + return "", nil, err + } + var files []fs.DirEntry + for _, fi := range all { + name := fi.Name() + if !strings.HasSuffix(name, ".go") || (!includeTest && strings.HasSuffix(name, "_test.go")) { + continue + } + match, err := env.matchFile(dir, fi.Name()) + if err != nil || !match { + continue + } + files = append(files, fi) + } + + if len(files) == 0 { + return "", nil, fmt.Errorf("dir %v contains no buildable, non-test .go files", dir) + } + + var pkgName string + var exports []stdlib.Symbol + fset := token.NewFileSet() + for _, fi := range files { + select { + case <-ctx.Done(): + return "", nil, ctx.Err() + default: + } + + fullFile := filepath.Join(dir, fi.Name()) + f, err := parser.ParseFile(fset, fullFile, nil, 0) + if err != nil { + if env.Logf != nil { + env.Logf("error parsing %v: %v", fullFile, err) + } + continue + } + if f.Name.Name == "documentation" { + // Special case from go/build.ImportDir, not + // handled by MatchFile above. + continue + } + if includeTest && strings.HasSuffix(f.Name.Name, "_test") { + // x_test package. We want internal test files only. + continue + } + pkgName = f.Name.Name + for name, obj := range f.Scope.Objects { + if ast.IsExported(name) { + var kind stdlib.Kind + switch obj.Kind { + case ast.Con: + kind = stdlib.Const + case ast.Typ: + kind = stdlib.Type + case ast.Var: + kind = stdlib.Var + case ast.Fun: + kind = stdlib.Func + } + exports = append(exports, stdlib.Symbol{ + Name: name, + Kind: kind, + Version: 0, // unknown; be permissive + }) + } + } + } + sortSymbols(exports) + + if env.Logf != nil { + env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, exports) + } + return pkgName, exports, nil +} + +func sortSymbols(syms []stdlib.Symbol) { + sort.Slice(syms, func(i, j int) bool { + return syms[i].Name < syms[j].Name + }) +} + +// findImport searches for a package with the given symbols. +// If no package is found, findImport returns ("", false, nil) +func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) { + // Sort the candidates by their import package length, + // assuming that shorter package names are better than long + // ones. Note that this sorts by the de-vendored name, so + // there's no "penalty" for vendoring. + sort.Sort(byDistanceOrImportPathShortLength(candidates)) + if pass.env.Logf != nil { + for i, c := range candidates { + pass.env.Logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir) + } + } + resolver, err := pass.env.GetResolver() + if err != nil { + return nil, err + } + + // Collect exports for packages with matching names. + rescv := make([]chan *pkg, len(candidates)) + for i := range candidates { + rescv[i] = make(chan *pkg, 1) + } + const maxConcurrentPackageImport = 4 + loadExportsSem := make(chan struct{}, maxConcurrentPackageImport) + + ctx, cancel := context.WithCancel(ctx) + var wg sync.WaitGroup + defer func() { + cancel() + wg.Wait() + }() + + wg.Add(1) + go func() { + defer wg.Done() + for i, c := range candidates { + select { + case loadExportsSem <- struct{}{}: + case <-ctx.Done(): + return + } + + wg.Add(1) + go func(c pkgDistance, resc chan<- *pkg) { + defer func() { + <-loadExportsSem + wg.Done() + }() + + if pass.env.Logf != nil { + pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName) + } + // If we're an x_test, load the package under test's test variant. + includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir + _, exports, err := resolver.loadExports(ctx, c.pkg, includeTest) + if err != nil { + if pass.env.Logf != nil { + pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) + } + resc <- nil + return + } + + exportsMap := make(map[string]bool, len(exports)) + for _, sym := range exports { + exportsMap[sym.Name] = true + } + + // If it doesn't have the right + // symbols, send nil to mean no match. + for symbol := range symbols { + if !exportsMap[symbol] { + resc <- nil + return + } + } + resc <- c.pkg + }(c, rescv[i]) + } + }() + + for _, resc := range rescv { + pkg := <-resc + if pkg == nil { + continue + } + return pkg, nil + } + return nil, nil +} + +// pkgIsCandidate reports whether pkg is a candidate for satisfying the +// finding which package pkgIdent in the file named by filename is trying +// to refer to. +// +// This check is purely lexical and is meant to be as fast as possible +// because it's run over all $GOPATH directories to filter out poor +// candidates in order to limit the CPU and I/O later parsing the +// exports in candidate packages. +// +// filename is the file being formatted. +// pkgIdent is the package being searched for, like "client" (if +// searching for "client.New") +func pkgIsCandidate(filename string, refs references, pkg *pkg) bool { + // Check "internal" and "vendor" visibility: + if !canUse(filename, pkg.dir) { + return false + } + + // Speed optimization to minimize disk I/O: + // the last two components on disk must contain the + // package name somewhere. + // + // This permits mismatch naming like directory + // "go-foo" being package "foo", or "pkg.v3" being "pkg", + // or directory "google.golang.org/api/cloudbilling/v1" + // being package "cloudbilling", but doesn't + // permit a directory "foo" to be package + // "bar", which is strongly discouraged + // anyway. There's no reason goimports needs + // to be slow just to accommodate that. + for pkgIdent := range refs { + lastTwo := lastTwoComponents(pkg.importPathShort) + if strings.Contains(lastTwo, pkgIdent) { + return true + } + if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) { + lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) + if strings.Contains(lastTwo, pkgIdent) { + return true + } + } + } + return false +} + +func hasHyphenOrUpperASCII(s string) bool { + for i := 0; i < len(s); i++ { + b := s[i] + if b == '-' || ('A' <= b && b <= 'Z') { + return true + } + } + return false +} + +func lowerASCIIAndRemoveHyphen(s string) (ret string) { + buf := make([]byte, 0, len(s)) + for i := 0; i < len(s); i++ { + b := s[i] + switch { + case b == '-': + continue + case 'A' <= b && b <= 'Z': + buf = append(buf, b+('a'-'A')) + default: + buf = append(buf, b) + } + } + return string(buf) +} + +// canUse reports whether the package in dir is usable from filename, +// respecting the Go "internal" and "vendor" visibility rules. +func canUse(filename, dir string) bool { + // Fast path check, before any allocations. If it doesn't contain vendor + // or internal, it's not tricky: + // Note that this can false-negative on directories like "notinternal", + // but we check it correctly below. This is just a fast path. + if !strings.Contains(dir, "vendor") && !strings.Contains(dir, "internal") { + return true + } + + dirSlash := filepath.ToSlash(dir) + if !strings.Contains(dirSlash, "/vendor/") && !strings.Contains(dirSlash, "/internal/") && !strings.HasSuffix(dirSlash, "/internal") { + return true + } + // Vendor or internal directory only visible from children of parent. + // That means the path from the current directory to the target directory + // can contain ../vendor or ../internal but not ../foo/vendor or ../foo/internal + // or bar/vendor or bar/internal. + // After stripping all the leading ../, the only okay place to see vendor or internal + // is at the very beginning of the path. + absfile, err := filepath.Abs(filename) + if err != nil { + return false + } + absdir, err := filepath.Abs(dir) + if err != nil { + return false + } + rel, err := filepath.Rel(absfile, absdir) + if err != nil { + return false + } + relSlash := filepath.ToSlash(rel) + if i := strings.LastIndex(relSlash, "../"); i >= 0 { + relSlash = relSlash[i+len("../"):] + } + return !strings.Contains(relSlash, "/vendor/") && !strings.Contains(relSlash, "/internal/") && !strings.HasSuffix(relSlash, "/internal") +} + +// lastTwoComponents returns at most the last two path components +// of v, using either / or \ as the path separator. +func lastTwoComponents(v string) string { + nslash := 0 + for i := len(v) - 1; i >= 0; i-- { + if v[i] == '/' || v[i] == '\\' { + nslash++ + if nslash == 2 { + return v[i:] + } + } + } + return v +} + +type visitFn func(node ast.Node) ast.Visitor + +func (fn visitFn) Visit(node ast.Node) ast.Visitor { + return fn(node) +} + +func symbolNameSet(symbols []stdlib.Symbol) map[string]bool { + names := make(map[string]bool) + for _, sym := range symbols { + switch sym.Kind { + case stdlib.Const, stdlib.Var, stdlib.Type, stdlib.Func: + names[sym.Name] = true + } + } + return names +} diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go new file mode 100644 index 00000000..f8346552 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -0,0 +1,354 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package imports implements a Go pretty-printer (like package "go/format") +// that also adds or removes import statements as necessary. +package imports + +import ( + "bufio" + "bytes" + "context" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/printer" + "go/token" + "io" + "regexp" + "strconv" + "strings" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/internal/event" +) + +// Options is golang.org/x/tools/imports.Options with extra internal-only options. +type Options struct { + Env *ProcessEnv // The environment to use. Note: this contains the cached module and filesystem state. + + // LocalPrefix is a comma-separated string of import path prefixes, which, if + // set, instructs Process to sort the import paths with the given prefixes + // into another group after 3rd-party packages. + LocalPrefix string + + Fragment bool // Accept fragment of a source file (no package statement) + AllErrors bool // Report all errors (not just the first 10 on different lines) + + Comments bool // Print comments (true if nil *Options provided) + TabIndent bool // Use tabs for indent (true if nil *Options provided) + TabWidth int // Tab width (8 if nil *Options provided) + + FormatOnly bool // Disable the insertion and deletion of imports +} + +// Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env. +func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) { + fileSet := token.NewFileSet() + file, adjust, err := parse(fileSet, filename, src, opt) + if err != nil { + return nil, err + } + + if !opt.FormatOnly { + if err := fixImports(fileSet, file, filename, opt.Env); err != nil { + return nil, err + } + } + return formatFile(fileSet, file, src, adjust, opt) +} + +// FixImports returns a list of fixes to the imports that, when applied, +// will leave the imports in the same state as Process. src and opt must +// be specified. +// +// Note that filename's directory influences which imports can be chosen, +// so it is important that filename be accurate. +func FixImports(ctx context.Context, filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) { + ctx, done := event.Start(ctx, "imports.FixImports") + defer done() + + fileSet := token.NewFileSet() + file, _, err := parse(fileSet, filename, src, opt) + if err != nil { + return nil, err + } + + return getFixes(ctx, fileSet, file, filename, opt.Env) +} + +// ApplyFixes applies all of the fixes to the file and formats it. extraMode +// is added in when parsing the file. src and opts must be specified, but no +// env is needed. +func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) { + // Don't use parse() -- we don't care about fragments or statement lists + // here, and we need to work with unparseable files. + fileSet := token.NewFileSet() + parserMode := parser.Mode(0) + if opt.Comments { + parserMode |= parser.ParseComments + } + if opt.AllErrors { + parserMode |= parser.AllErrors + } + parserMode |= extraMode + + file, err := parser.ParseFile(fileSet, filename, src, parserMode) + if file == nil { + return nil, err + } + + // Apply the fixes to the file. + apply(fileSet, file, fixes) + + return formatFile(fileSet, file, src, nil, opt) +} + +// formatFile formats the file syntax tree. +// It may mutate the token.FileSet and the ast.File. +// +// If an adjust function is provided, it is called after formatting +// with the original source (formatFile's src parameter) and the +// formatted file, and returns the postpocessed result. +func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) { + mergeImports(file) + sortImports(opt.LocalPrefix, fset.File(file.Pos()), file) + var spacesBefore []string // import paths we need spaces before + for _, impSection := range astutil.Imports(fset, file) { + // Within each block of contiguous imports, see if any + // import lines are in different group numbers. If so, + // we'll need to put a space between them so it's + // compatible with gofmt. + lastGroup := -1 + for _, importSpec := range impSection { + importPath, _ := strconv.Unquote(importSpec.Path.Value) + groupNum := importGroup(opt.LocalPrefix, importPath) + if groupNum != lastGroup && lastGroup != -1 { + spacesBefore = append(spacesBefore, importPath) + } + lastGroup = groupNum + } + + } + + printerMode := printer.UseSpaces + if opt.TabIndent { + printerMode |= printer.TabIndent + } + printConfig := &printer.Config{Mode: printerMode, Tabwidth: opt.TabWidth} + + var buf bytes.Buffer + err := printConfig.Fprint(&buf, fset, file) + if err != nil { + return nil, err + } + out := buf.Bytes() + if adjust != nil { + out = adjust(src, out) + } + if len(spacesBefore) > 0 { + out, err = addImportSpaces(bytes.NewReader(out), spacesBefore) + if err != nil { + return nil, err + } + } + + out, err = format.Source(out) + if err != nil { + return nil, err + } + return out, nil +} + +// parse parses src, which was read from filename, +// as a Go source file or statement list. +func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) { + parserMode := parser.Mode(0) + if opt.Comments { + parserMode |= parser.ParseComments + } + if opt.AllErrors { + parserMode |= parser.AllErrors + } + + // Try as whole source file. + file, err := parser.ParseFile(fset, filename, src, parserMode) + if err == nil { + return file, nil, nil + } + // If the error is that the source file didn't begin with a + // package line and we accept fragmented input, fall through to + // try as a source fragment. Stop and return on any other error. + if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") { + return nil, nil, err + } + + // If this is a declaration list, make it a source file + // by inserting a package clause. + // Insert using a ;, not a newline, so that parse errors are on + // the correct line. + const prefix = "package main;" + psrc := append([]byte(prefix), src...) + file, err = parser.ParseFile(fset, filename, psrc, parserMode) + if err == nil { + // Gofmt will turn the ; into a \n. + // Do that ourselves now and update the file contents, + // so that positions and line numbers are correct going forward. + psrc[len(prefix)-1] = '\n' + fset.File(file.Package).SetLinesForContent(psrc) + + // If a main function exists, we will assume this is a main + // package and leave the file. + if containsMainFunc(file) { + return file, nil, nil + } + + adjust := func(orig, src []byte) []byte { + // Remove the package clause. + src = src[len(prefix):] + return matchSpace(orig, src) + } + return file, adjust, nil + } + // If the error is that the source file didn't begin with a + // declaration, fall through to try as a statement list. + // Stop and return on any other error. + if !strings.Contains(err.Error(), "expected declaration") { + return nil, nil, err + } + + // If this is a statement list, make it a source file + // by inserting a package clause and turning the list + // into a function body. This handles expressions too. + // Insert using a ;, not a newline, so that the line numbers + // in fsrc match the ones in src. + fsrc := append(append([]byte("package p; func _() {"), src...), '}') + file, err = parser.ParseFile(fset, filename, fsrc, parserMode) + if err == nil { + adjust := func(orig, src []byte) []byte { + // Remove the wrapping. + // Gofmt has turned the ; into a \n\n. + src = src[len("package p\n\nfunc _() {"):] + src = src[:len(src)-len("}\n")] + // Gofmt has also indented the function body one level. + // Remove that indent. + src = bytes.ReplaceAll(src, []byte("\n\t"), []byte("\n")) + return matchSpace(orig, src) + } + return file, adjust, nil + } + + // Failed, and out of options. + return nil, nil, err +} + +// containsMainFunc checks if a file contains a function declaration with the +// function signature 'func main()' +func containsMainFunc(file *ast.File) bool { + for _, decl := range file.Decls { + if f, ok := decl.(*ast.FuncDecl); ok { + if f.Name.Name != "main" { + continue + } + + if len(f.Type.Params.List) != 0 { + continue + } + + if f.Type.Results != nil && len(f.Type.Results.List) != 0 { + continue + } + + return true + } + } + + return false +} + +func cutSpace(b []byte) (before, middle, after []byte) { + i := 0 + for i < len(b) && (b[i] == ' ' || b[i] == '\t' || b[i] == '\n') { + i++ + } + j := len(b) + for j > 0 && (b[j-1] == ' ' || b[j-1] == '\t' || b[j-1] == '\n') { + j-- + } + if i <= j { + return b[:i], b[i:j], b[j:] + } + return nil, nil, b[j:] +} + +// matchSpace reformats src to use the same space context as orig. +// 1. If orig begins with blank lines, matchSpace inserts them at the beginning of src. +// 2. matchSpace copies the indentation of the first non-blank line in orig +// to every non-blank line in src. +// 3. matchSpace copies the trailing space from orig and uses it in place +// of src's trailing space. +func matchSpace(orig []byte, src []byte) []byte { + before, _, after := cutSpace(orig) + i := bytes.LastIndex(before, []byte{'\n'}) + before, indent := before[:i+1], before[i+1:] + + _, src, _ = cutSpace(src) + + var b bytes.Buffer + b.Write(before) + for len(src) > 0 { + line := src + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, src = line[:i+1], line[i+1:] + } else { + src = nil + } + if len(line) > 0 && line[0] != '\n' { // not blank + b.Write(indent) + } + b.Write(line) + } + b.Write(after) + return b.Bytes() +} + +var impLine = regexp.MustCompile(`^\s+(?:[\w\.]+\s+)?"(.+?)"`) + +func addImportSpaces(r io.Reader, breaks []string) ([]byte, error) { + var out bytes.Buffer + in := bufio.NewReader(r) + inImports := false + done := false + for { + s, err := in.ReadString('\n') + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + + if !inImports && !done && strings.HasPrefix(s, "import") { + inImports = true + } + if inImports && (strings.HasPrefix(s, "var") || + strings.HasPrefix(s, "func") || + strings.HasPrefix(s, "const") || + strings.HasPrefix(s, "type")) { + done = true + inImports = false + } + if inImports && len(breaks) > 0 { + if m := impLine.FindStringSubmatch(s); m != nil { + if m[1] == breaks[0] { + out.WriteByte('\n') + breaks = breaks[1:] + } + } + } + + fmt.Fprint(&out, s) + } + return out.Bytes(), nil +} diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go new file mode 100644 index 00000000..82fe644a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -0,0 +1,841 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "os" + "path" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "golang.org/x/mod/module" + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/gopathwalk" + "golang.org/x/tools/internal/stdlib" +) + +// Notes(rfindley): ModuleResolver appears to be heavily optimized for scanning +// as fast as possible, which is desirable for a call to goimports from the +// command line, but it doesn't work as well for gopls, where it suffers from +// slow startup (golang/go#44863) and intermittent hanging (golang/go#59216), +// both caused by populating the cache, albeit in slightly different ways. +// +// A high level list of TODOs: +// - Optimize the scan itself, as there is some redundancy statting and +// reading go.mod files. +// - Invert the relationship between ProcessEnv and Resolver (see the +// docstring of ProcessEnv). +// - Make it easier to use an external resolver implementation. +// +// Smaller TODOs are annotated in the code below. + +// ModuleResolver implements the Resolver interface for a workspace using +// modules. +// +// A goal of the ModuleResolver is to invoke the Go command as little as +// possible. To this end, it runs the Go command only for listing module +// information (i.e. `go list -m -e -json ...`). Package scanning, the process +// of loading package information for the modules, is implemented internally +// via the scan method. +// +// It has two types of state: the state derived from the go command, which +// is populated by init, and the state derived from scans, which is populated +// via scan. A root is considered scanned if it has been walked to discover +// directories. However, if the scan did not require additional information +// from the directory (such as package name or exports), the directory +// information itself may be partially populated. It will be lazily filled in +// as needed by scans, using the scanCallback. +type ModuleResolver struct { + env *ProcessEnv + + // Module state, populated during construction + dummyVendorMod *gocommand.ModuleJSON // if vendoring is enabled, a pseudo-module to represent the /vendor directory + moduleCacheDir string // GOMODCACHE, inferred from GOPATH if unset + roots []gopathwalk.Root // roots to scan, in approximate order of importance + mains []*gocommand.ModuleJSON // main modules + mainByDir map[string]*gocommand.ModuleJSON // module information by dir, to join with roots + modsByModPath []*gocommand.ModuleJSON // all modules, ordered by # of path components in their module path + modsByDir []*gocommand.ModuleJSON // ...or by the number of path components in their Dir. + + // Scanning state, populated by scan + + // scanSema prevents concurrent scans, and guards scannedRoots and the cache + // fields below (though the caches themselves are concurrency safe). + // Receive to acquire, send to release. + scanSema chan struct{} + scannedRoots map[gopathwalk.Root]bool // if true, root has been walked + + // Caches of directory info, populated by scans and scan callbacks + // + // moduleCacheCache stores cached information about roots in the module + // cache, which are immutable and therefore do not need to be invalidated. + // + // otherCache stores information about all other roots (even GOROOT), which + // may change. + moduleCacheCache *DirInfoCache + otherCache *DirInfoCache +} + +// newModuleResolver returns a new module-aware goimports resolver. +// +// Note: use caution when modifying this constructor: changes must also be +// reflected in ModuleResolver.ClearForNewScan. +func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleResolver, error) { + r := &ModuleResolver{ + env: e, + scanSema: make(chan struct{}, 1), + } + r.scanSema <- struct{}{} // release + + goenv, err := r.env.goEnv() + if err != nil { + return nil, err + } + + // TODO(rfindley): can we refactor to share logic with r.env.invokeGo? + inv := gocommand.Invocation{ + BuildFlags: r.env.BuildFlags, + ModFlag: r.env.ModFlag, + Env: r.env.env(), + Logf: r.env.Logf, + WorkingDir: r.env.WorkingDir, + } + + vendorEnabled := false + var mainModVendor *gocommand.ModuleJSON // for module vendoring + var mainModsVendor []*gocommand.ModuleJSON // for workspace vendoring + + goWork := r.env.Env["GOWORK"] + if len(goWork) == 0 { + // TODO(rfindley): VendorEnabled runs the go command to get GOFLAGS, but + // they should be available from the ProcessEnv. Can we avoid the redundant + // invocation? + vendorEnabled, mainModVendor, err = gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner) + if err != nil { + return nil, err + } + } else { + vendorEnabled, mainModsVendor, err = gocommand.WorkspaceVendorEnabled(context.Background(), inv, r.env.GocmdRunner) + if err != nil { + return nil, err + } + } + + if vendorEnabled { + if mainModVendor != nil { + // Module vendor mode is on, so all the non-Main modules are irrelevant, + // and we need to search /vendor for everything. + r.mains = []*gocommand.ModuleJSON{mainModVendor} + r.dummyVendorMod = &gocommand.ModuleJSON{ + Path: "", + Dir: filepath.Join(mainModVendor.Dir, "vendor"), + } + r.modsByModPath = []*gocommand.ModuleJSON{mainModVendor, r.dummyVendorMod} + r.modsByDir = []*gocommand.ModuleJSON{mainModVendor, r.dummyVendorMod} + } else { + // Workspace vendor mode is on, so all the non-Main modules are irrelevant, + // and we need to search /vendor for everything. + r.mains = mainModsVendor + r.dummyVendorMod = &gocommand.ModuleJSON{ + Path: "", + Dir: filepath.Join(filepath.Dir(goWork), "vendor"), + } + r.modsByModPath = append(append([]*gocommand.ModuleJSON{}, mainModsVendor...), r.dummyVendorMod) + r.modsByDir = append(append([]*gocommand.ModuleJSON{}, mainModsVendor...), r.dummyVendorMod) + } + } else { + // Vendor mode is off, so run go list -m ... to find everything. + err := r.initAllMods() + // We expect an error when running outside of a module with + // GO111MODULE=on. Other errors are fatal. + if err != nil { + if errMsg := err.Error(); !strings.Contains(errMsg, "working directory is not part of a module") && !strings.Contains(errMsg, "go.mod file not found") { + return nil, err + } + } + } + + r.moduleCacheDir = gomodcacheForEnv(goenv) + if r.moduleCacheDir == "" { + return nil, fmt.Errorf("cannot resolve GOMODCACHE") + } + + sort.Slice(r.modsByModPath, func(i, j int) bool { + count := func(x int) int { + return strings.Count(r.modsByModPath[x].Path, "/") + } + return count(j) < count(i) // descending order + }) + sort.Slice(r.modsByDir, func(i, j int) bool { + count := func(x int) int { + return strings.Count(r.modsByDir[x].Dir, string(filepath.Separator)) + } + return count(j) < count(i) // descending order + }) + + r.roots = []gopathwalk.Root{} + if goenv["GOROOT"] != "" { // "" happens in tests + r.roots = append(r.roots, gopathwalk.Root{Path: filepath.Join(goenv["GOROOT"], "/src"), Type: gopathwalk.RootGOROOT}) + } + r.mainByDir = make(map[string]*gocommand.ModuleJSON) + for _, main := range r.mains { + r.roots = append(r.roots, gopathwalk.Root{Path: main.Dir, Type: gopathwalk.RootCurrentModule}) + r.mainByDir[main.Dir] = main + } + if vendorEnabled { + r.roots = append(r.roots, gopathwalk.Root{Path: r.dummyVendorMod.Dir, Type: gopathwalk.RootOther}) + } else { + addDep := func(mod *gocommand.ModuleJSON) { + if mod.Replace == nil { + // This is redundant with the cache, but we'll skip it cheaply enough + // when we encounter it in the module cache scan. + // + // Including it at a lower index in r.roots than the module cache dir + // helps prioritize matches from within existing dependencies. + r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootModuleCache}) + } else { + r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootOther}) + } + } + // Walk dependent modules before scanning the full mod cache, direct deps first. + for _, mod := range r.modsByModPath { + if !mod.Indirect && !mod.Main { + addDep(mod) + } + } + for _, mod := range r.modsByModPath { + if mod.Indirect && !mod.Main { + addDep(mod) + } + } + // If provided, share the moduleCacheCache. + // + // TODO(rfindley): The module cache is immutable. However, the loaded + // exports do depend on GOOS and GOARCH. Fortunately, the + // ProcessEnv.buildContext does not adjust these from build.DefaultContext + // (even though it should). So for now, this is OK to share, but we need to + // add logic for handling GOOS/GOARCH. + r.moduleCacheCache = moduleCacheCache + r.roots = append(r.roots, gopathwalk.Root{Path: r.moduleCacheDir, Type: gopathwalk.RootModuleCache}) + } + + r.scannedRoots = map[gopathwalk.Root]bool{} + if r.moduleCacheCache == nil { + r.moduleCacheCache = NewDirInfoCache() + } + r.otherCache = NewDirInfoCache() + return r, nil +} + +// gomodcacheForEnv returns the GOMODCACHE value to use based on the given env +// map, which must have GOMODCACHE and GOPATH populated. +// +// TODO(rfindley): this is defensive refactoring. +// 1. Is this even relevant anymore? Can't we just read GOMODCACHE. +// 2. Use this to separate module cache scanning from other scanning. +func gomodcacheForEnv(goenv map[string]string) string { + if gmc := goenv["GOMODCACHE"]; gmc != "" { + return gmc + } + gopaths := filepath.SplitList(goenv["GOPATH"]) + if len(gopaths) == 0 { + return "" + } + return filepath.Join(gopaths[0], "/pkg/mod") +} + +func (r *ModuleResolver) initAllMods() error { + stdout, err := r.env.invokeGo(context.TODO(), "list", "-m", "-e", "-json", "...") + if err != nil { + return err + } + for dec := json.NewDecoder(stdout); dec.More(); { + mod := &gocommand.ModuleJSON{} + if err := dec.Decode(mod); err != nil { + return err + } + if mod.Dir == "" { + if r.env.Logf != nil { + r.env.Logf("module %v has not been downloaded and will be ignored", mod.Path) + } + // Can't do anything with a module that's not downloaded. + continue + } + // golang/go#36193: the go command doesn't always clean paths. + mod.Dir = filepath.Clean(mod.Dir) + r.modsByModPath = append(r.modsByModPath, mod) + r.modsByDir = append(r.modsByDir, mod) + if mod.Main { + r.mains = append(r.mains, mod) + } + } + return nil +} + +// ClearForNewScan invalidates the last scan. +// +// It preserves the set of roots, but forgets about the set of directories. +// Though it forgets the set of module cache directories, it remembers their +// contents, since they are assumed to be immutable. +func (r *ModuleResolver) ClearForNewScan() Resolver { + <-r.scanSema // acquire r, to guard scannedRoots + r2 := &ModuleResolver{ + env: r.env, + dummyVendorMod: r.dummyVendorMod, + moduleCacheDir: r.moduleCacheDir, + roots: r.roots, + mains: r.mains, + mainByDir: r.mainByDir, + modsByModPath: r.modsByModPath, + + scanSema: make(chan struct{}, 1), + scannedRoots: make(map[gopathwalk.Root]bool), + otherCache: NewDirInfoCache(), + moduleCacheCache: r.moduleCacheCache, + } + r2.scanSema <- struct{}{} // r2 must start released + // Invalidate root scans. We don't need to invalidate module cache roots, + // because they are immutable. + // (We don't support a use case where GOMODCACHE is cleaned in the middle of + // e.g. a gopls session: the user must restart gopls to get accurate + // imports.) + // + // Scanning for new directories in GOMODCACHE should be handled elsewhere, + // via a call to ScanModuleCache. + for _, root := range r.roots { + if root.Type == gopathwalk.RootModuleCache && r.scannedRoots[root] { + r2.scannedRoots[root] = true + } + } + r.scanSema <- struct{}{} // release r + return r2 +} + +// ClearModuleInfo invalidates resolver state that depends on go.mod file +// contents (essentially, the output of go list -m -json ...). +// +// Notably, it does not forget directory contents, which are reset +// asynchronously via ClearForNewScan. +// +// If the ProcessEnv is a GOPATH environment, ClearModuleInfo is a no op. +// +// TODO(rfindley): move this to a new env.go, consolidating ProcessEnv methods. +func (e *ProcessEnv) ClearModuleInfo() { + if r, ok := e.resolver.(*ModuleResolver); ok { + resolver, err := newModuleResolver(e, e.ModCache) + if err != nil { + e.resolver = nil + e.resolverErr = err + return + } + + <-r.scanSema // acquire (guards caches) + resolver.moduleCacheCache = r.moduleCacheCache + resolver.otherCache = r.otherCache + r.scanSema <- struct{}{} // release + + e.UpdateResolver(resolver) + } +} + +// UpdateResolver sets the resolver for the ProcessEnv to use in imports +// operations. Only for use with the result of [Resolver.ClearForNewScan]. +// +// TODO(rfindley): this awkward API is a result of the (arguably) inverted +// relationship between configuration and state described in the doc comment +// for [ProcessEnv]. +func (e *ProcessEnv) UpdateResolver(r Resolver) { + e.resolver = r + e.resolverErr = nil +} + +// findPackage returns the module and directory from within the main modules +// and their dependencies that contains the package at the given import path, +// or returns nil, "" if no module is in scope. +func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON, string) { + // This can't find packages in the stdlib, but that's harmless for all + // the existing code paths. + for _, m := range r.modsByModPath { + if !strings.HasPrefix(importPath, m.Path) { + continue + } + pathInModule := importPath[len(m.Path):] + pkgDir := filepath.Join(m.Dir, pathInModule) + if r.dirIsNestedModule(pkgDir, m) { + continue + } + + if info, ok := r.cacheLoad(pkgDir); ok { + if loaded, err := info.reachedStatus(nameLoaded); loaded { + if err != nil { + continue // No package in this dir. + } + return m, pkgDir + } + if scanned, err := info.reachedStatus(directoryScanned); scanned && err != nil { + continue // Dir is unreadable, etc. + } + // This is slightly wrong: a directory doesn't have to have an + // importable package to count as a package for package-to-module + // resolution. package main or _test files should count but + // don't. + // TODO(heschi): fix this. + if _, err := r.cachePackageName(info); err == nil { + return m, pkgDir + } + } + + // Not cached. Read the filesystem. + pkgFiles, err := os.ReadDir(pkgDir) + if err != nil { + continue + } + // A module only contains a package if it has buildable go + // files in that directory. If not, it could be provided by an + // outer module. See #29736. + for _, fi := range pkgFiles { + if ok, _ := r.env.matchFile(pkgDir, fi.Name()); ok { + return m, pkgDir + } + } + } + return nil, "" +} + +func (r *ModuleResolver) cacheLoad(dir string) (directoryPackageInfo, bool) { + if info, ok := r.moduleCacheCache.Load(dir); ok { + return info, ok + } + return r.otherCache.Load(dir) +} + +func (r *ModuleResolver) cacheStore(info directoryPackageInfo) { + if info.rootType == gopathwalk.RootModuleCache { + r.moduleCacheCache.Store(info.dir, info) + } else { + r.otherCache.Store(info.dir, info) + } +} + +// cachePackageName caches the package name for a dir already in the cache. +func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, error) { + if info.rootType == gopathwalk.RootModuleCache { + return r.moduleCacheCache.CachePackageName(info) + } + return r.otherCache.CachePackageName(info) +} + +func (r *ModuleResolver) cacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []stdlib.Symbol, error) { + if info.rootType == gopathwalk.RootModuleCache { + return r.moduleCacheCache.CacheExports(ctx, env, info) + } + return r.otherCache.CacheExports(ctx, env, info) +} + +// findModuleByDir returns the module that contains dir, or nil if no such +// module is in scope. +func (r *ModuleResolver) findModuleByDir(dir string) *gocommand.ModuleJSON { + // This is quite tricky and may not be correct. dir could be: + // - a package in the main module. + // - a replace target underneath the main module's directory. + // - a nested module in the above. + // - a replace target somewhere totally random. + // - a nested module in the above. + // - in the mod cache. + // - in /vendor/ in -mod=vendor mode. + // - nested module? Dunno. + // Rumor has it that replace targets cannot contain other replace targets. + // + // Note that it is critical here that modsByDir is sorted to have deeper dirs + // first. This ensures that findModuleByDir finds the innermost module. + // See also golang/go#56291. + for _, m := range r.modsByDir { + if !strings.HasPrefix(dir, m.Dir) { + continue + } + + if r.dirIsNestedModule(dir, m) { + continue + } + + return m + } + return nil +} + +// dirIsNestedModule reports if dir is contained in a nested module underneath +// mod, not actually in mod. +func (r *ModuleResolver) dirIsNestedModule(dir string, mod *gocommand.ModuleJSON) bool { + if !strings.HasPrefix(dir, mod.Dir) { + return false + } + if r.dirInModuleCache(dir) { + // Nested modules in the module cache are pruned, + // so it cannot be a nested module. + return false + } + if mod != nil && mod == r.dummyVendorMod { + // The /vendor pseudomodule is flattened and doesn't actually count. + return false + } + modDir, _ := r.modInfo(dir) + if modDir == "" { + return false + } + return modDir != mod.Dir +} + +func readModName(modFile string) string { + modBytes, err := os.ReadFile(modFile) + if err != nil { + return "" + } + return modulePath(modBytes) +} + +func (r *ModuleResolver) modInfo(dir string) (modDir, modName string) { + if r.dirInModuleCache(dir) { + if matches := modCacheRegexp.FindStringSubmatch(dir); len(matches) == 3 { + index := strings.Index(dir, matches[1]+"@"+matches[2]) + modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2]) + return modDir, readModName(filepath.Join(modDir, "go.mod")) + } + } + for { + if info, ok := r.cacheLoad(dir); ok { + return info.moduleDir, info.moduleName + } + f := filepath.Join(dir, "go.mod") + info, err := os.Stat(f) + if err == nil && !info.IsDir() { + return dir, readModName(f) + } + + d := filepath.Dir(dir) + if len(d) >= len(dir) { + return "", "" // reached top of file system, no go.mod + } + dir = d + } +} + +func (r *ModuleResolver) dirInModuleCache(dir string) bool { + if r.moduleCacheDir == "" { + return false + } + return strings.HasPrefix(dir, r.moduleCacheDir) +} + +func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { + names := map[string]string{} + for _, path := range importPaths { + // TODO(rfindley): shouldn't this use the dirInfoCache? + _, packageDir := r.findPackage(path) + if packageDir == "" { + continue + } + name, err := packageDirToName(packageDir) + if err != nil { + continue + } + names[path] = name + } + return names, nil +} + +func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error { + ctx, done := event.Start(ctx, "imports.ModuleResolver.scan") + defer done() + + processDir := func(info directoryPackageInfo) { + // Skip this directory if we were not able to get the package information successfully. + if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { + return + } + pkg, err := r.canonicalize(info) + if err != nil { + return + } + if !callback.dirFound(pkg) { + return + } + + pkg.packageName, err = r.cachePackageName(info) + if err != nil { + return + } + if !callback.packageNameLoaded(pkg) { + return + } + + _, exports, err := r.loadExports(ctx, pkg, false) + if err != nil { + return + } + callback.exportsLoaded(pkg, exports) + } + + // Start processing everything in the cache, and listen for the new stuff + // we discover in the walk below. + stop1 := r.moduleCacheCache.ScanAndListen(ctx, processDir) + defer stop1() + stop2 := r.otherCache.ScanAndListen(ctx, processDir) + defer stop2() + + // We assume cached directories are fully cached, including all their + // children, and have not changed. We can skip them. + skip := func(root gopathwalk.Root, dir string) bool { + if r.env.SkipPathInScan != nil && root.Type == gopathwalk.RootCurrentModule { + if root.Path == dir { + return false + } + + if r.env.SkipPathInScan(filepath.Clean(dir)) { + return true + } + } + + info, ok := r.cacheLoad(dir) + if !ok { + return false + } + // This directory can be skipped as long as we have already scanned it. + // Packages with errors will continue to have errors, so there is no need + // to rescan them. + packageScanned, _ := info.reachedStatus(directoryScanned) + return packageScanned + } + + add := func(root gopathwalk.Root, dir string) { + r.cacheStore(r.scanDirForPackage(root, dir)) + } + + // r.roots and the callback are not necessarily safe to use in the + // goroutine below. Process them eagerly. + roots := filterRoots(r.roots, callback.rootFound) + // We can't cancel walks, because we need them to finish to have a usable + // cache. Instead, run them in a separate goroutine and detach. + scanDone := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + return + case <-r.scanSema: // acquire + } + defer func() { r.scanSema <- struct{}{} }() // release + // We have the lock on r.scannedRoots, and no other scans can run. + for _, root := range roots { + if ctx.Err() != nil { + return + } + + if r.scannedRoots[root] { + continue + } + gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: r.env.Logf, ModulesEnabled: true}) + r.scannedRoots[root] = true + } + close(scanDone) + }() + select { + case <-ctx.Done(): + case <-scanDone: + } + return nil +} + +func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) float64 { + if stdlib.HasPackage(path) { + return MaxRelevance + } + mod, _ := r.findPackage(path) + return modRelevance(mod) +} + +func modRelevance(mod *gocommand.ModuleJSON) float64 { + var relevance float64 + switch { + case mod == nil: // out of scope + return MaxRelevance - 4 + case mod.Indirect: + relevance = MaxRelevance - 3 + case !mod.Main: + relevance = MaxRelevance - 2 + default: + relevance = MaxRelevance - 1 // main module ties with stdlib + } + + _, versionString, ok := module.SplitPathVersion(mod.Path) + if ok { + index := strings.Index(versionString, "v") + if index == -1 { + return relevance + } + if versionNumber, err := strconv.ParseFloat(versionString[index+1:], 64); err == nil { + relevance += versionNumber / 1000 + } + } + + return relevance +} + +// canonicalize gets the result of canonicalizing the packages using the results +// of initializing the resolver from 'go list -m'. +func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) { + // Packages in GOROOT are already canonical, regardless of the std/cmd modules. + if info.rootType == gopathwalk.RootGOROOT { + return &pkg{ + importPathShort: info.nonCanonicalImportPath, + dir: info.dir, + packageName: path.Base(info.nonCanonicalImportPath), + relevance: MaxRelevance, + }, nil + } + + importPath := info.nonCanonicalImportPath + mod := r.findModuleByDir(info.dir) + // Check if the directory is underneath a module that's in scope. + if mod != nil { + // It is. If dir is the target of a replace directive, + // our guessed import path is wrong. Use the real one. + if mod.Dir == info.dir { + importPath = mod.Path + } else { + dirInMod := info.dir[len(mod.Dir)+len("/"):] + importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod)) + } + } else if !strings.HasPrefix(importPath, info.moduleName) { + // The module's name doesn't match the package's import path. It + // probably needs a replace directive we don't have. + return nil, fmt.Errorf("package in %q is not valid without a replace statement", info.dir) + } + + res := &pkg{ + importPathShort: importPath, + dir: info.dir, + relevance: modRelevance(mod), + } + // We may have discovered a package that has a different version + // in scope already. Canonicalize to that one if possible. + if _, canonicalDir := r.findPackage(importPath); canonicalDir != "" { + res.dir = canonicalDir + } + return res, nil +} + +func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error) { + if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest { + return r.cacheExports(ctx, r.env, info) + } + return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest) +} + +func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo { + subdir := "" + if dir != root.Path { + subdir = dir[len(root.Path)+len("/"):] + } + importPath := filepath.ToSlash(subdir) + if strings.HasPrefix(importPath, "vendor/") { + // Only enter vendor directories if they're explicitly requested as a root. + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("unwanted vendor directory"), + } + } + switch root.Type { + case gopathwalk.RootCurrentModule: + importPath = path.Join(r.mainByDir[root.Path].Path, filepath.ToSlash(subdir)) + case gopathwalk.RootModuleCache: + matches := modCacheRegexp.FindStringSubmatch(subdir) + if len(matches) == 0 { + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("invalid module cache path: %v", subdir), + } + } + modPath, err := module.UnescapePath(filepath.ToSlash(matches[1])) + if err != nil { + if r.env.Logf != nil { + r.env.Logf("decoding module cache path %q: %v", subdir, err) + } + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("decoding module cache path %q: %v", subdir, err), + } + } + importPath = path.Join(modPath, filepath.ToSlash(matches[3])) + } + + modDir, modName := r.modInfo(dir) + result := directoryPackageInfo{ + status: directoryScanned, + dir: dir, + rootType: root.Type, + nonCanonicalImportPath: importPath, + moduleDir: modDir, + moduleName: modName, + } + if root.Type == gopathwalk.RootGOROOT { + // stdlib packages are always in scope, despite the confusing go.mod + return result + } + return result +} + +// modCacheRegexp splits a path in a module cache into module, module version, and package. +var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) + +var ( + slashSlash = []byte("//") + moduleStr = []byte("module") +) + +// modulePath returns the module path from the gomod file text. +// If it cannot find a module path, it returns an empty string. +// It is tolerant of unrelated problems in the go.mod file. +// +// Copied from cmd/go/internal/modfile. +func modulePath(mod []byte) string { + for len(mod) > 0 { + line := mod + mod = nil + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, mod = line[:i], line[i+1:] + } + if i := bytes.Index(line, slashSlash); i >= 0 { + line = line[:i] + } + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, moduleStr) { + continue + } + line = line[len(moduleStr):] + n := len(line) + line = bytes.TrimSpace(line) + if len(line) == n || len(line) == 0 { + continue + } + + if line[0] == '"' || line[0] == '`' { + p, err := strconv.Unquote(string(line)) + if err != nil { + return "" // malformed quoted string or multiline module path + } + return p + } + + return string(line) + } + return "" // missing module path +} diff --git a/vendor/golang.org/x/tools/internal/imports/mod_cache.go b/vendor/golang.org/x/tools/internal/imports/mod_cache.go new file mode 100644 index 00000000..b1192696 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/mod_cache.go @@ -0,0 +1,331 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "context" + "fmt" + "path" + "path/filepath" + "strings" + "sync" + + "golang.org/x/mod/module" + "golang.org/x/tools/internal/gopathwalk" + "golang.org/x/tools/internal/stdlib" +) + +// To find packages to import, the resolver needs to know about all of +// the packages that could be imported. This includes packages that are +// already in modules that are in (1) the current module, (2) replace targets, +// and (3) packages in the module cache. Packages in (1) and (2) may change over +// time, as the client may edit the current module and locally replaced modules. +// The module cache (which includes all of the packages in (3)) can only +// ever be added to. +// +// The resolver can thus save state about packages in the module cache +// and guarantee that this will not change over time. To obtain information +// about new modules added to the module cache, the module cache should be +// rescanned. +// +// It is OK to serve information about modules that have been deleted, +// as they do still exist. +// TODO(suzmue): can we share information with the caller about +// what module needs to be downloaded to import this package? + +type directoryPackageStatus int + +const ( + _ directoryPackageStatus = iota + directoryScanned + nameLoaded + exportsLoaded +) + +// directoryPackageInfo holds (possibly incomplete) information about packages +// contained in a given directory. +type directoryPackageInfo struct { + // status indicates the extent to which this struct has been filled in. + status directoryPackageStatus + // err is non-nil when there was an error trying to reach status. + err error + + // Set when status >= directoryScanned. + + // dir is the absolute directory of this package. + dir string + rootType gopathwalk.RootType + // nonCanonicalImportPath is the package's expected import path. It may + // not actually be importable at that path. + nonCanonicalImportPath string + + // Module-related information. + moduleDir string // The directory that is the module root of this dir. + moduleName string // The module name that contains this dir. + + // Set when status >= nameLoaded. + + packageName string // the package name, as declared in the source. + + // Set when status >= exportsLoaded. + // TODO(rfindley): it's hard to see this, but exports depend implicitly on + // the default build context GOOS and GOARCH. + // + // We can make this explicit, and key exports by GOOS, GOARCH. + exports []stdlib.Symbol +} + +// reachedStatus returns true when info has a status at least target and any error associated with +// an attempt to reach target. +func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (bool, error) { + if info.err == nil { + return info.status >= target, nil + } + if info.status == target { + return true, info.err + } + return true, nil +} + +// DirInfoCache is a concurrency-safe map for storing information about +// directories that may contain packages. +// +// The information in this cache is built incrementally. Entries are initialized in scan. +// No new keys should be added in any other functions, as all directories containing +// packages are identified in scan. +// +// Other functions, including loadExports and findPackage, may update entries in this cache +// as they discover new things about the directory. +// +// The information in the cache is not expected to change for the cache's +// lifetime, so there is no protection against competing writes. Users should +// take care not to hold the cache across changes to the underlying files. +type DirInfoCache struct { + mu sync.Mutex + // dirs stores information about packages in directories, keyed by absolute path. + dirs map[string]*directoryPackageInfo + listeners map[*int]cacheListener +} + +func NewDirInfoCache() *DirInfoCache { + return &DirInfoCache{ + dirs: make(map[string]*directoryPackageInfo), + listeners: make(map[*int]cacheListener), + } +} + +type cacheListener func(directoryPackageInfo) + +// ScanAndListen calls listener on all the items in the cache, and on anything +// newly added. The returned stop function waits for all in-flight callbacks to +// finish and blocks new ones. +func (d *DirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() { + ctx, cancel := context.WithCancel(ctx) + + // Flushing out all the callbacks is tricky without knowing how many there + // are going to be. Setting an arbitrary limit makes it much easier. + const maxInFlight = 10 + sema := make(chan struct{}, maxInFlight) + for i := 0; i < maxInFlight; i++ { + sema <- struct{}{} + } + + cookie := new(int) // A unique ID we can use for the listener. + + // We can't hold mu while calling the listener. + d.mu.Lock() + var keys []string + for key := range d.dirs { + keys = append(keys, key) + } + d.listeners[cookie] = func(info directoryPackageInfo) { + select { + case <-ctx.Done(): + return + case <-sema: + } + listener(info) + sema <- struct{}{} + } + d.mu.Unlock() + + stop := func() { + cancel() + d.mu.Lock() + delete(d.listeners, cookie) + d.mu.Unlock() + for i := 0; i < maxInFlight; i++ { + <-sema + } + } + + // Process the pre-existing keys. + for _, k := range keys { + select { + case <-ctx.Done(): + return stop + default: + } + if v, ok := d.Load(k); ok { + listener(v) + } + } + + return stop +} + +// Store stores the package info for dir. +func (d *DirInfoCache) Store(dir string, info directoryPackageInfo) { + d.mu.Lock() + // TODO(rfindley, golang/go#59216): should we overwrite an existing entry? + // That seems incorrect as the cache should be idempotent. + _, old := d.dirs[dir] + d.dirs[dir] = &info + var listeners []cacheListener + for _, l := range d.listeners { + listeners = append(listeners, l) + } + d.mu.Unlock() + + if !old { + for _, l := range listeners { + l(info) + } + } +} + +// Load returns a copy of the directoryPackageInfo for absolute directory dir. +func (d *DirInfoCache) Load(dir string) (directoryPackageInfo, bool) { + d.mu.Lock() + defer d.mu.Unlock() + info, ok := d.dirs[dir] + if !ok { + return directoryPackageInfo{}, false + } + return *info, true +} + +// Keys returns the keys currently present in d. +func (d *DirInfoCache) Keys() (keys []string) { + d.mu.Lock() + defer d.mu.Unlock() + for key := range d.dirs { + keys = append(keys, key) + } + return keys +} + +func (d *DirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) { + if loaded, err := info.reachedStatus(nameLoaded); loaded { + return info.packageName, err + } + if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { + return "", fmt.Errorf("cannot read package name, scan error: %v", err) + } + info.packageName, info.err = packageDirToName(info.dir) + info.status = nameLoaded + d.Store(info.dir, info) + return info.packageName, info.err +} + +func (d *DirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []stdlib.Symbol, error) { + if reached, _ := info.reachedStatus(exportsLoaded); reached { + return info.packageName, info.exports, info.err + } + if reached, err := info.reachedStatus(nameLoaded); reached && err != nil { + return "", nil, err + } + info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir, false) + if info.err == context.Canceled || info.err == context.DeadlineExceeded { + return info.packageName, info.exports, info.err + } + // The cache structure wants things to proceed linearly. We can skip a + // step here, but only if we succeed. + if info.status == nameLoaded || info.err == nil { + info.status = exportsLoaded + } else { + info.status = nameLoaded + } + d.Store(info.dir, info) + return info.packageName, info.exports, info.err +} + +// ScanModuleCache walks the given directory, which must be a GOMODCACHE value, +// for directory package information, storing the results in cache. +func ScanModuleCache(dir string, cache *DirInfoCache, logf func(string, ...any)) { + // Note(rfindley): it's hard to see, but this function attempts to implement + // just the side effects on cache of calling PrimeCache with a ProcessEnv + // that has the given dir as its GOMODCACHE. + // + // Teasing out the control flow, we see that we can avoid any handling of + // vendor/ and can infer module info entirely from the path, simplifying the + // logic here. + + root := gopathwalk.Root{ + Path: filepath.Clean(dir), + Type: gopathwalk.RootModuleCache, + } + + directoryInfo := func(root gopathwalk.Root, dir string) directoryPackageInfo { + // This is a copy of ModuleResolver.scanDirForPackage, trimmed down to + // logic that applies to a module cache directory. + + subdir := "" + if dir != root.Path { + subdir = dir[len(root.Path)+len("/"):] + } + + matches := modCacheRegexp.FindStringSubmatch(subdir) + if len(matches) == 0 { + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("invalid module cache path: %v", subdir), + } + } + modPath, err := module.UnescapePath(filepath.ToSlash(matches[1])) + if err != nil { + if logf != nil { + logf("decoding module cache path %q: %v", subdir, err) + } + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("decoding module cache path %q: %v", subdir, err), + } + } + importPath := path.Join(modPath, filepath.ToSlash(matches[3])) + index := strings.Index(dir, matches[1]+"@"+matches[2]) + modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2]) + modName := readModName(filepath.Join(modDir, "go.mod")) + return directoryPackageInfo{ + status: directoryScanned, + dir: dir, + rootType: root.Type, + nonCanonicalImportPath: importPath, + moduleDir: modDir, + moduleName: modName, + } + } + + add := func(root gopathwalk.Root, dir string) { + info := directoryInfo(root, dir) + cache.Store(info.dir, info) + } + + skip := func(_ gopathwalk.Root, dir string) bool { + // Skip directories that have already been scanned. + // + // Note that gopathwalk only adds "package" directories, which must contain + // a .go file, and all such package directories in the module cache are + // immutable. So if we can load a dir, it can be skipped. + info, ok := cache.Load(dir) + if !ok { + return false + } + packageScanned, _ := info.reachedStatus(directoryScanned) + return packageScanned + } + + gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: logf, ModulesEnabled: true}) +} diff --git a/vendor/golang.org/x/tools/internal/imports/sortimports.go b/vendor/golang.org/x/tools/internal/imports/sortimports.go new file mode 100644 index 00000000..da8194fd --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/sortimports.go @@ -0,0 +1,297 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Hacked up copy of go/ast/import.go +// Modified to use a single token.File in preference to a FileSet. + +package imports + +import ( + "go/ast" + "go/token" + "log" + "sort" + "strconv" +) + +// sortImports sorts runs of consecutive import lines in import blocks in f. +// It also removes duplicate imports when it is possible to do so without data loss. +// +// It may mutate the token.File and the ast.File. +func sortImports(localPrefix string, tokFile *token.File, f *ast.File) { + for i, d := range f.Decls { + d, ok := d.(*ast.GenDecl) + if !ok || d.Tok != token.IMPORT { + // Not an import declaration, so we're done. + // Imports are always first. + break + } + + if len(d.Specs) == 0 { + // Empty import block, remove it. + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + } + + if !d.Lparen.IsValid() { + // Not a block: sorted by default. + continue + } + + // Identify and sort runs of specs on successive lines. + i := 0 + specs := d.Specs[:0] + for j, s := range d.Specs { + if j > i && tokFile.Line(s.Pos()) > 1+tokFile.Line(d.Specs[j-1].End()) { + // j begins a new run. End this one. + specs = append(specs, sortSpecs(localPrefix, tokFile, f, d.Specs[i:j])...) + i = j + } + } + specs = append(specs, sortSpecs(localPrefix, tokFile, f, d.Specs[i:])...) + d.Specs = specs + + // Deduping can leave a blank line before the rparen; clean that up. + // Ignore line directives. + if len(d.Specs) > 0 { + lastSpec := d.Specs[len(d.Specs)-1] + lastLine := tokFile.PositionFor(lastSpec.Pos(), false).Line + if rParenLine := tokFile.PositionFor(d.Rparen, false).Line; rParenLine > lastLine+1 { + tokFile.MergeLine(rParenLine - 1) // has side effects! + } + } + } +} + +// mergeImports merges all the import declarations into the first one. +// Taken from golang.org/x/tools/ast/astutil. +// This does not adjust line numbers properly +func mergeImports(f *ast.File) { + if len(f.Decls) <= 1 { + return + } + + // Merge all the import declarations into the first one. + var first *ast.GenDecl + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { + continue + } + if first == nil { + first = gen + continue // Don't touch the first one. + } + // We now know there is more than one package in this import + // declaration. Ensure that it ends up parenthesized. + first.Lparen = first.Pos() + // Move the imports of the other import declaration to the first one. + for _, spec := range gen.Specs { + spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + first.Specs = append(first.Specs, spec) + } + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + i-- + } +} + +// declImports reports whether gen contains an import of path. +// Taken from golang.org/x/tools/ast/astutil. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +func importPath(s ast.Spec) string { + t, err := strconv.Unquote(s.(*ast.ImportSpec).Path.Value) + if err == nil { + return t + } + return "" +} + +func importName(s ast.Spec) string { + n := s.(*ast.ImportSpec).Name + if n == nil { + return "" + } + return n.Name +} + +func importComment(s ast.Spec) string { + c := s.(*ast.ImportSpec).Comment + if c == nil { + return "" + } + return c.Text() +} + +// collapse indicates whether prev may be removed, leaving only next. +func collapse(prev, next ast.Spec) bool { + if importPath(next) != importPath(prev) || importName(next) != importName(prev) { + return false + } + return prev.(*ast.ImportSpec).Comment == nil +} + +type posSpan struct { + Start token.Pos + End token.Pos +} + +// sortSpecs sorts the import specs within each import decl. +// It may mutate the token.File. +func sortSpecs(localPrefix string, tokFile *token.File, f *ast.File, specs []ast.Spec) []ast.Spec { + // Can't short-circuit here even if specs are already sorted, + // since they might yet need deduplication. + // A lone import, however, may be safely ignored. + if len(specs) <= 1 { + return specs + } + + // Record positions for specs. + pos := make([]posSpan, len(specs)) + for i, s := range specs { + pos[i] = posSpan{s.Pos(), s.End()} + } + + // Identify comments in this range. + // Any comment from pos[0].Start to the final line counts. + lastLine := tokFile.Line(pos[len(pos)-1].End) + cstart := len(f.Comments) + cend := len(f.Comments) + for i, g := range f.Comments { + if g.Pos() < pos[0].Start { + continue + } + if i < cstart { + cstart = i + } + if tokFile.Line(g.End()) > lastLine { + cend = i + break + } + } + comments := f.Comments[cstart:cend] + + // Assign each comment to the import spec preceding it. + importComment := map[*ast.ImportSpec][]*ast.CommentGroup{} + specIndex := 0 + for _, g := range comments { + for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() { + specIndex++ + } + s := specs[specIndex].(*ast.ImportSpec) + importComment[s] = append(importComment[s], g) + } + + // Sort the import specs by import path. + // Remove duplicates, when possible without data loss. + // Reassign the import paths to have the same position sequence. + // Reassign each comment to abut the end of its spec. + // Sort the comments by new position. + sort.Sort(byImportSpec{localPrefix, specs}) + + // Dedup. Thanks to our sorting, we can just consider + // adjacent pairs of imports. + deduped := specs[:0] + for i, s := range specs { + if i == len(specs)-1 || !collapse(s, specs[i+1]) { + deduped = append(deduped, s) + } else { + p := s.Pos() + tokFile.MergeLine(tokFile.Line(p)) // has side effects! + } + } + specs = deduped + + // Fix up comment positions + for i, s := range specs { + s := s.(*ast.ImportSpec) + if s.Name != nil { + s.Name.NamePos = pos[i].Start + } + s.Path.ValuePos = pos[i].Start + s.EndPos = pos[i].End + nextSpecPos := pos[i].End + + for _, g := range importComment[s] { + for _, c := range g.List { + c.Slash = pos[i].End + nextSpecPos = c.End() + } + } + if i < len(specs)-1 { + pos[i+1].Start = nextSpecPos + pos[i+1].End = nextSpecPos + } + } + + sort.Sort(byCommentPos(comments)) + + // Fixup comments can insert blank lines, because import specs are on different lines. + // We remove those blank lines here by merging import spec to the first import spec line. + firstSpecLine := tokFile.Line(specs[0].Pos()) + for _, s := range specs[1:] { + p := s.Pos() + line := tokFile.Line(p) + for previousLine := line - 1; previousLine >= firstSpecLine; { + // MergeLine can panic. Avoid the panic at the cost of not removing the blank line + // golang/go#50329 + if previousLine > 0 && previousLine < tokFile.LineCount() { + tokFile.MergeLine(previousLine) // has side effects! + previousLine-- + } else { + // try to gather some data to diagnose how this could happen + req := "Please report what the imports section of your go file looked like." + log.Printf("panic avoided: first:%d line:%d previous:%d max:%d. %s", + firstSpecLine, line, previousLine, tokFile.LineCount(), req) + } + } + } + return specs +} + +type byImportSpec struct { + localPrefix string + specs []ast.Spec // slice of *ast.ImportSpec +} + +func (x byImportSpec) Len() int { return len(x.specs) } +func (x byImportSpec) Swap(i, j int) { x.specs[i], x.specs[j] = x.specs[j], x.specs[i] } +func (x byImportSpec) Less(i, j int) bool { + ipath := importPath(x.specs[i]) + jpath := importPath(x.specs[j]) + + igroup := importGroup(x.localPrefix, ipath) + jgroup := importGroup(x.localPrefix, jpath) + if igroup != jgroup { + return igroup < jgroup + } + + if ipath != jpath { + return ipath < jpath + } + iname := importName(x.specs[i]) + jname := importName(x.specs[j]) + + if iname != jname { + return iname < jname + } + return importComment(x.specs[i]) < importComment(x.specs[j]) +} + +type byCommentPos []*ast.CommentGroup + +func (x byCommentPos) Len() int { return len(x) } +func (x byCommentPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x byCommentPos) Less(i, j int) bool { return x[i].Pos() < x[j].Pos() } diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go new file mode 100644 index 00000000..fd689207 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -0,0 +1,17320 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate.go. DO NOT EDIT. + +package stdlib + +var PackageSymbols = map[string][]Symbol{ + "archive/tar": { + {"(*Header).FileInfo", Method, 1}, + {"(*Reader).Next", Method, 0}, + {"(*Reader).Read", Method, 0}, + {"(*Writer).AddFS", Method, 22}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Write", Method, 0}, + {"(*Writer).WriteHeader", Method, 0}, + {"(Format).String", Method, 10}, + {"ErrFieldTooLong", Var, 0}, + {"ErrHeader", Var, 0}, + {"ErrInsecurePath", Var, 20}, + {"ErrWriteAfterClose", Var, 0}, + {"ErrWriteTooLong", Var, 0}, + {"FileInfoHeader", Func, 1}, + {"Format", Type, 10}, + {"FormatGNU", Const, 10}, + {"FormatPAX", Const, 10}, + {"FormatUSTAR", Const, 10}, + {"FormatUnknown", Const, 10}, + {"Header", Type, 0}, + {"Header.AccessTime", Field, 0}, + {"Header.ChangeTime", Field, 0}, + {"Header.Devmajor", Field, 0}, + {"Header.Devminor", Field, 0}, + {"Header.Format", Field, 10}, + {"Header.Gid", Field, 0}, + {"Header.Gname", Field, 0}, + {"Header.Linkname", Field, 0}, + {"Header.ModTime", Field, 0}, + {"Header.Mode", Field, 0}, + {"Header.Name", Field, 0}, + {"Header.PAXRecords", Field, 10}, + {"Header.Size", Field, 0}, + {"Header.Typeflag", Field, 0}, + {"Header.Uid", Field, 0}, + {"Header.Uname", Field, 0}, + {"Header.Xattrs", Field, 3}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"Reader", Type, 0}, + {"TypeBlock", Const, 0}, + {"TypeChar", Const, 0}, + {"TypeCont", Const, 0}, + {"TypeDir", Const, 0}, + {"TypeFifo", Const, 0}, + {"TypeGNULongLink", Const, 1}, + {"TypeGNULongName", Const, 1}, + {"TypeGNUSparse", Const, 3}, + {"TypeLink", Const, 0}, + {"TypeReg", Const, 0}, + {"TypeRegA", Const, 0}, + {"TypeSymlink", Const, 0}, + {"TypeXGlobalHeader", Const, 0}, + {"TypeXHeader", Const, 0}, + {"Writer", Type, 0}, + }, + "archive/zip": { + {"(*File).DataOffset", Method, 2}, + {"(*File).FileInfo", Method, 0}, + {"(*File).ModTime", Method, 0}, + {"(*File).Mode", Method, 0}, + {"(*File).Open", Method, 0}, + {"(*File).OpenRaw", Method, 17}, + {"(*File).SetModTime", Method, 0}, + {"(*File).SetMode", Method, 0}, + {"(*FileHeader).FileInfo", Method, 0}, + {"(*FileHeader).ModTime", Method, 0}, + {"(*FileHeader).Mode", Method, 0}, + {"(*FileHeader).SetModTime", Method, 0}, + {"(*FileHeader).SetMode", Method, 0}, + {"(*ReadCloser).Close", Method, 0}, + {"(*ReadCloser).Open", Method, 16}, + {"(*ReadCloser).RegisterDecompressor", Method, 6}, + {"(*Reader).Open", Method, 16}, + {"(*Reader).RegisterDecompressor", Method, 6}, + {"(*Writer).AddFS", Method, 22}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Copy", Method, 17}, + {"(*Writer).Create", Method, 0}, + {"(*Writer).CreateHeader", Method, 0}, + {"(*Writer).CreateRaw", Method, 17}, + {"(*Writer).Flush", Method, 4}, + {"(*Writer).RegisterCompressor", Method, 6}, + {"(*Writer).SetComment", Method, 10}, + {"(*Writer).SetOffset", Method, 5}, + {"Compressor", Type, 2}, + {"Decompressor", Type, 2}, + {"Deflate", Const, 0}, + {"ErrAlgorithm", Var, 0}, + {"ErrChecksum", Var, 0}, + {"ErrFormat", Var, 0}, + {"ErrInsecurePath", Var, 20}, + {"File", Type, 0}, + {"File.FileHeader", Field, 0}, + {"FileHeader", Type, 0}, + {"FileHeader.CRC32", Field, 0}, + {"FileHeader.Comment", Field, 0}, + {"FileHeader.CompressedSize", Field, 0}, + {"FileHeader.CompressedSize64", Field, 1}, + {"FileHeader.CreatorVersion", Field, 0}, + {"FileHeader.ExternalAttrs", Field, 0}, + {"FileHeader.Extra", Field, 0}, + {"FileHeader.Flags", Field, 0}, + {"FileHeader.Method", Field, 0}, + {"FileHeader.Modified", Field, 10}, + {"FileHeader.ModifiedDate", Field, 0}, + {"FileHeader.ModifiedTime", Field, 0}, + {"FileHeader.Name", Field, 0}, + {"FileHeader.NonUTF8", Field, 10}, + {"FileHeader.ReaderVersion", Field, 0}, + {"FileHeader.UncompressedSize", Field, 0}, + {"FileHeader.UncompressedSize64", Field, 1}, + {"FileInfoHeader", Func, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"OpenReader", Func, 0}, + {"ReadCloser", Type, 0}, + {"ReadCloser.Reader", Field, 0}, + {"Reader", Type, 0}, + {"Reader.Comment", Field, 0}, + {"Reader.File", Field, 0}, + {"RegisterCompressor", Func, 2}, + {"RegisterDecompressor", Func, 2}, + {"Store", Const, 0}, + {"Writer", Type, 0}, + }, + "bufio": { + {"(*Reader).Buffered", Method, 0}, + {"(*Reader).Discard", Method, 5}, + {"(*Reader).Peek", Method, 0}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).ReadByte", Method, 0}, + {"(*Reader).ReadBytes", Method, 0}, + {"(*Reader).ReadLine", Method, 0}, + {"(*Reader).ReadRune", Method, 0}, + {"(*Reader).ReadSlice", Method, 0}, + {"(*Reader).ReadString", Method, 0}, + {"(*Reader).Reset", Method, 2}, + {"(*Reader).Size", Method, 10}, + {"(*Reader).UnreadByte", Method, 0}, + {"(*Reader).UnreadRune", Method, 0}, + {"(*Reader).WriteTo", Method, 1}, + {"(*Scanner).Buffer", Method, 6}, + {"(*Scanner).Bytes", Method, 1}, + {"(*Scanner).Err", Method, 1}, + {"(*Scanner).Scan", Method, 1}, + {"(*Scanner).Split", Method, 1}, + {"(*Scanner).Text", Method, 1}, + {"(*Writer).Available", Method, 0}, + {"(*Writer).AvailableBuffer", Method, 18}, + {"(*Writer).Buffered", Method, 0}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).ReadFrom", Method, 1}, + {"(*Writer).Reset", Method, 2}, + {"(*Writer).Size", Method, 10}, + {"(*Writer).Write", Method, 0}, + {"(*Writer).WriteByte", Method, 0}, + {"(*Writer).WriteRune", Method, 0}, + {"(*Writer).WriteString", Method, 0}, + {"(ReadWriter).Available", Method, 0}, + {"(ReadWriter).AvailableBuffer", Method, 18}, + {"(ReadWriter).Discard", Method, 5}, + {"(ReadWriter).Flush", Method, 0}, + {"(ReadWriter).Peek", Method, 0}, + {"(ReadWriter).Read", Method, 0}, + {"(ReadWriter).ReadByte", Method, 0}, + {"(ReadWriter).ReadBytes", Method, 0}, + {"(ReadWriter).ReadFrom", Method, 1}, + {"(ReadWriter).ReadLine", Method, 0}, + {"(ReadWriter).ReadRune", Method, 0}, + {"(ReadWriter).ReadSlice", Method, 0}, + {"(ReadWriter).ReadString", Method, 0}, + {"(ReadWriter).UnreadByte", Method, 0}, + {"(ReadWriter).UnreadRune", Method, 0}, + {"(ReadWriter).Write", Method, 0}, + {"(ReadWriter).WriteByte", Method, 0}, + {"(ReadWriter).WriteRune", Method, 0}, + {"(ReadWriter).WriteString", Method, 0}, + {"(ReadWriter).WriteTo", Method, 1}, + {"ErrAdvanceTooFar", Var, 1}, + {"ErrBadReadCount", Var, 15}, + {"ErrBufferFull", Var, 0}, + {"ErrFinalToken", Var, 6}, + {"ErrInvalidUnreadByte", Var, 0}, + {"ErrInvalidUnreadRune", Var, 0}, + {"ErrNegativeAdvance", Var, 1}, + {"ErrNegativeCount", Var, 0}, + {"ErrTooLong", Var, 1}, + {"MaxScanTokenSize", Const, 1}, + {"NewReadWriter", Func, 0}, + {"NewReader", Func, 0}, + {"NewReaderSize", Func, 0}, + {"NewScanner", Func, 1}, + {"NewWriter", Func, 0}, + {"NewWriterSize", Func, 0}, + {"ReadWriter", Type, 0}, + {"ReadWriter.Reader", Field, 0}, + {"ReadWriter.Writer", Field, 0}, + {"Reader", Type, 0}, + {"ScanBytes", Func, 1}, + {"ScanLines", Func, 1}, + {"ScanRunes", Func, 1}, + {"ScanWords", Func, 1}, + {"Scanner", Type, 1}, + {"SplitFunc", Type, 1}, + {"Writer", Type, 0}, + }, + "bytes": { + {"(*Buffer).Available", Method, 21}, + {"(*Buffer).AvailableBuffer", Method, 21}, + {"(*Buffer).Bytes", Method, 0}, + {"(*Buffer).Cap", Method, 5}, + {"(*Buffer).Grow", Method, 1}, + {"(*Buffer).Len", Method, 0}, + {"(*Buffer).Next", Method, 0}, + {"(*Buffer).Read", Method, 0}, + {"(*Buffer).ReadByte", Method, 0}, + {"(*Buffer).ReadBytes", Method, 0}, + {"(*Buffer).ReadFrom", Method, 0}, + {"(*Buffer).ReadRune", Method, 0}, + {"(*Buffer).ReadString", Method, 0}, + {"(*Buffer).Reset", Method, 0}, + {"(*Buffer).String", Method, 0}, + {"(*Buffer).Truncate", Method, 0}, + {"(*Buffer).UnreadByte", Method, 0}, + {"(*Buffer).UnreadRune", Method, 0}, + {"(*Buffer).Write", Method, 0}, + {"(*Buffer).WriteByte", Method, 0}, + {"(*Buffer).WriteRune", Method, 0}, + {"(*Buffer).WriteString", Method, 0}, + {"(*Buffer).WriteTo", Method, 0}, + {"(*Reader).Len", Method, 0}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).ReadAt", Method, 0}, + {"(*Reader).ReadByte", Method, 0}, + {"(*Reader).ReadRune", Method, 0}, + {"(*Reader).Reset", Method, 7}, + {"(*Reader).Seek", Method, 0}, + {"(*Reader).Size", Method, 5}, + {"(*Reader).UnreadByte", Method, 0}, + {"(*Reader).UnreadRune", Method, 0}, + {"(*Reader).WriteTo", Method, 1}, + {"Buffer", Type, 0}, + {"Clone", Func, 20}, + {"Compare", Func, 0}, + {"Contains", Func, 0}, + {"ContainsAny", Func, 7}, + {"ContainsFunc", Func, 21}, + {"ContainsRune", Func, 7}, + {"Count", Func, 0}, + {"Cut", Func, 18}, + {"CutPrefix", Func, 20}, + {"CutSuffix", Func, 20}, + {"Equal", Func, 0}, + {"EqualFold", Func, 0}, + {"ErrTooLarge", Var, 0}, + {"Fields", Func, 0}, + {"FieldsFunc", Func, 0}, + {"HasPrefix", Func, 0}, + {"HasSuffix", Func, 0}, + {"Index", Func, 0}, + {"IndexAny", Func, 0}, + {"IndexByte", Func, 0}, + {"IndexFunc", Func, 0}, + {"IndexRune", Func, 0}, + {"Join", Func, 0}, + {"LastIndex", Func, 0}, + {"LastIndexAny", Func, 0}, + {"LastIndexByte", Func, 5}, + {"LastIndexFunc", Func, 0}, + {"Map", Func, 0}, + {"MinRead", Const, 0}, + {"NewBuffer", Func, 0}, + {"NewBufferString", Func, 0}, + {"NewReader", Func, 0}, + {"Reader", Type, 0}, + {"Repeat", Func, 0}, + {"Replace", Func, 0}, + {"ReplaceAll", Func, 12}, + {"Runes", Func, 0}, + {"Split", Func, 0}, + {"SplitAfter", Func, 0}, + {"SplitAfterN", Func, 0}, + {"SplitN", Func, 0}, + {"Title", Func, 0}, + {"ToLower", Func, 0}, + {"ToLowerSpecial", Func, 0}, + {"ToTitle", Func, 0}, + {"ToTitleSpecial", Func, 0}, + {"ToUpper", Func, 0}, + {"ToUpperSpecial", Func, 0}, + {"ToValidUTF8", Func, 13}, + {"Trim", Func, 0}, + {"TrimFunc", Func, 0}, + {"TrimLeft", Func, 0}, + {"TrimLeftFunc", Func, 0}, + {"TrimPrefix", Func, 1}, + {"TrimRight", Func, 0}, + {"TrimRightFunc", Func, 0}, + {"TrimSpace", Func, 0}, + {"TrimSuffix", Func, 1}, + }, + "cmp": { + {"Compare", Func, 21}, + {"Less", Func, 21}, + {"Or", Func, 22}, + {"Ordered", Type, 21}, + }, + "compress/bzip2": { + {"(StructuralError).Error", Method, 0}, + {"NewReader", Func, 0}, + {"StructuralError", Type, 0}, + }, + "compress/flate": { + {"(*ReadError).Error", Method, 0}, + {"(*WriteError).Error", Method, 0}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Reset", Method, 2}, + {"(*Writer).Write", Method, 0}, + {"(CorruptInputError).Error", Method, 0}, + {"(InternalError).Error", Method, 0}, + {"BestCompression", Const, 0}, + {"BestSpeed", Const, 0}, + {"CorruptInputError", Type, 0}, + {"DefaultCompression", Const, 0}, + {"HuffmanOnly", Const, 7}, + {"InternalError", Type, 0}, + {"NewReader", Func, 0}, + {"NewReaderDict", Func, 0}, + {"NewWriter", Func, 0}, + {"NewWriterDict", Func, 0}, + {"NoCompression", Const, 0}, + {"ReadError", Type, 0}, + {"ReadError.Err", Field, 0}, + {"ReadError.Offset", Field, 0}, + {"Reader", Type, 0}, + {"Resetter", Type, 4}, + {"WriteError", Type, 0}, + {"WriteError.Err", Field, 0}, + {"WriteError.Offset", Field, 0}, + {"Writer", Type, 0}, + }, + "compress/gzip": { + {"(*Reader).Close", Method, 0}, + {"(*Reader).Multistream", Method, 4}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).Reset", Method, 3}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Flush", Method, 1}, + {"(*Writer).Reset", Method, 2}, + {"(*Writer).Write", Method, 0}, + {"BestCompression", Const, 0}, + {"BestSpeed", Const, 0}, + {"DefaultCompression", Const, 0}, + {"ErrChecksum", Var, 0}, + {"ErrHeader", Var, 0}, + {"Header", Type, 0}, + {"Header.Comment", Field, 0}, + {"Header.Extra", Field, 0}, + {"Header.ModTime", Field, 0}, + {"Header.Name", Field, 0}, + {"Header.OS", Field, 0}, + {"HuffmanOnly", Const, 8}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"NewWriterLevel", Func, 0}, + {"NoCompression", Const, 0}, + {"Reader", Type, 0}, + {"Reader.Header", Field, 0}, + {"Writer", Type, 0}, + {"Writer.Header", Field, 0}, + }, + "compress/lzw": { + {"(*Reader).Close", Method, 17}, + {"(*Reader).Read", Method, 17}, + {"(*Reader).Reset", Method, 17}, + {"(*Writer).Close", Method, 17}, + {"(*Writer).Reset", Method, 17}, + {"(*Writer).Write", Method, 17}, + {"LSB", Const, 0}, + {"MSB", Const, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"Order", Type, 0}, + {"Reader", Type, 17}, + {"Writer", Type, 17}, + }, + "compress/zlib": { + {"(*Writer).Close", Method, 0}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Reset", Method, 2}, + {"(*Writer).Write", Method, 0}, + {"BestCompression", Const, 0}, + {"BestSpeed", Const, 0}, + {"DefaultCompression", Const, 0}, + {"ErrChecksum", Var, 0}, + {"ErrDictionary", Var, 0}, + {"ErrHeader", Var, 0}, + {"HuffmanOnly", Const, 8}, + {"NewReader", Func, 0}, + {"NewReaderDict", Func, 0}, + {"NewWriter", Func, 0}, + {"NewWriterLevel", Func, 0}, + {"NewWriterLevelDict", Func, 0}, + {"NoCompression", Const, 0}, + {"Resetter", Type, 4}, + {"Writer", Type, 0}, + }, + "container/heap": { + {"Fix", Func, 2}, + {"Init", Func, 0}, + {"Interface", Type, 0}, + {"Pop", Func, 0}, + {"Push", Func, 0}, + {"Remove", Func, 0}, + }, + "container/list": { + {"(*Element).Next", Method, 0}, + {"(*Element).Prev", Method, 0}, + {"(*List).Back", Method, 0}, + {"(*List).Front", Method, 0}, + {"(*List).Init", Method, 0}, + {"(*List).InsertAfter", Method, 0}, + {"(*List).InsertBefore", Method, 0}, + {"(*List).Len", Method, 0}, + {"(*List).MoveAfter", Method, 2}, + {"(*List).MoveBefore", Method, 2}, + {"(*List).MoveToBack", Method, 0}, + {"(*List).MoveToFront", Method, 0}, + {"(*List).PushBack", Method, 0}, + {"(*List).PushBackList", Method, 0}, + {"(*List).PushFront", Method, 0}, + {"(*List).PushFrontList", Method, 0}, + {"(*List).Remove", Method, 0}, + {"Element", Type, 0}, + {"Element.Value", Field, 0}, + {"List", Type, 0}, + {"New", Func, 0}, + }, + "container/ring": { + {"(*Ring).Do", Method, 0}, + {"(*Ring).Len", Method, 0}, + {"(*Ring).Link", Method, 0}, + {"(*Ring).Move", Method, 0}, + {"(*Ring).Next", Method, 0}, + {"(*Ring).Prev", Method, 0}, + {"(*Ring).Unlink", Method, 0}, + {"New", Func, 0}, + {"Ring", Type, 0}, + {"Ring.Value", Field, 0}, + }, + "context": { + {"AfterFunc", Func, 21}, + {"Background", Func, 7}, + {"CancelCauseFunc", Type, 20}, + {"CancelFunc", Type, 7}, + {"Canceled", Var, 7}, + {"Cause", Func, 20}, + {"Context", Type, 7}, + {"DeadlineExceeded", Var, 7}, + {"TODO", Func, 7}, + {"WithCancel", Func, 7}, + {"WithCancelCause", Func, 20}, + {"WithDeadline", Func, 7}, + {"WithDeadlineCause", Func, 21}, + {"WithTimeout", Func, 7}, + {"WithTimeoutCause", Func, 21}, + {"WithValue", Func, 7}, + {"WithoutCancel", Func, 21}, + }, + "crypto": { + {"(Hash).Available", Method, 0}, + {"(Hash).HashFunc", Method, 4}, + {"(Hash).New", Method, 0}, + {"(Hash).Size", Method, 0}, + {"(Hash).String", Method, 15}, + {"BLAKE2b_256", Const, 9}, + {"BLAKE2b_384", Const, 9}, + {"BLAKE2b_512", Const, 9}, + {"BLAKE2s_256", Const, 9}, + {"Decrypter", Type, 5}, + {"DecrypterOpts", Type, 5}, + {"Hash", Type, 0}, + {"MD4", Const, 0}, + {"MD5", Const, 0}, + {"MD5SHA1", Const, 0}, + {"PrivateKey", Type, 0}, + {"PublicKey", Type, 2}, + {"RIPEMD160", Const, 0}, + {"RegisterHash", Func, 0}, + {"SHA1", Const, 0}, + {"SHA224", Const, 0}, + {"SHA256", Const, 0}, + {"SHA384", Const, 0}, + {"SHA3_224", Const, 4}, + {"SHA3_256", Const, 4}, + {"SHA3_384", Const, 4}, + {"SHA3_512", Const, 4}, + {"SHA512", Const, 0}, + {"SHA512_224", Const, 5}, + {"SHA512_256", Const, 5}, + {"Signer", Type, 4}, + {"SignerOpts", Type, 4}, + }, + "crypto/aes": { + {"(KeySizeError).Error", Method, 0}, + {"BlockSize", Const, 0}, + {"KeySizeError", Type, 0}, + {"NewCipher", Func, 0}, + }, + "crypto/cipher": { + {"(StreamReader).Read", Method, 0}, + {"(StreamWriter).Close", Method, 0}, + {"(StreamWriter).Write", Method, 0}, + {"AEAD", Type, 2}, + {"Block", Type, 0}, + {"BlockMode", Type, 0}, + {"NewCBCDecrypter", Func, 0}, + {"NewCBCEncrypter", Func, 0}, + {"NewCFBDecrypter", Func, 0}, + {"NewCFBEncrypter", Func, 0}, + {"NewCTR", Func, 0}, + {"NewGCM", Func, 2}, + {"NewGCMWithNonceSize", Func, 5}, + {"NewGCMWithTagSize", Func, 11}, + {"NewOFB", Func, 0}, + {"Stream", Type, 0}, + {"StreamReader", Type, 0}, + {"StreamReader.R", Field, 0}, + {"StreamReader.S", Field, 0}, + {"StreamWriter", Type, 0}, + {"StreamWriter.Err", Field, 0}, + {"StreamWriter.S", Field, 0}, + {"StreamWriter.W", Field, 0}, + }, + "crypto/des": { + {"(KeySizeError).Error", Method, 0}, + {"BlockSize", Const, 0}, + {"KeySizeError", Type, 0}, + {"NewCipher", Func, 0}, + {"NewTripleDESCipher", Func, 0}, + }, + "crypto/dsa": { + {"ErrInvalidPublicKey", Var, 0}, + {"GenerateKey", Func, 0}, + {"GenerateParameters", Func, 0}, + {"L1024N160", Const, 0}, + {"L2048N224", Const, 0}, + {"L2048N256", Const, 0}, + {"L3072N256", Const, 0}, + {"ParameterSizes", Type, 0}, + {"Parameters", Type, 0}, + {"Parameters.G", Field, 0}, + {"Parameters.P", Field, 0}, + {"Parameters.Q", Field, 0}, + {"PrivateKey", Type, 0}, + {"PrivateKey.PublicKey", Field, 0}, + {"PrivateKey.X", Field, 0}, + {"PublicKey", Type, 0}, + {"PublicKey.Parameters", Field, 0}, + {"PublicKey.Y", Field, 0}, + {"Sign", Func, 0}, + {"Verify", Func, 0}, + }, + "crypto/ecdh": { + {"(*PrivateKey).Bytes", Method, 20}, + {"(*PrivateKey).Curve", Method, 20}, + {"(*PrivateKey).ECDH", Method, 20}, + {"(*PrivateKey).Equal", Method, 20}, + {"(*PrivateKey).Public", Method, 20}, + {"(*PrivateKey).PublicKey", Method, 20}, + {"(*PublicKey).Bytes", Method, 20}, + {"(*PublicKey).Curve", Method, 20}, + {"(*PublicKey).Equal", Method, 20}, + {"Curve", Type, 20}, + {"P256", Func, 20}, + {"P384", Func, 20}, + {"P521", Func, 20}, + {"PrivateKey", Type, 20}, + {"PublicKey", Type, 20}, + {"X25519", Func, 20}, + }, + "crypto/ecdsa": { + {"(*PrivateKey).ECDH", Method, 20}, + {"(*PrivateKey).Equal", Method, 15}, + {"(*PrivateKey).Public", Method, 4}, + {"(*PrivateKey).Sign", Method, 4}, + {"(*PublicKey).ECDH", Method, 20}, + {"(*PublicKey).Equal", Method, 15}, + {"(PrivateKey).Add", Method, 0}, + {"(PrivateKey).Double", Method, 0}, + {"(PrivateKey).IsOnCurve", Method, 0}, + {"(PrivateKey).Params", Method, 0}, + {"(PrivateKey).ScalarBaseMult", Method, 0}, + {"(PrivateKey).ScalarMult", Method, 0}, + {"(PublicKey).Add", Method, 0}, + {"(PublicKey).Double", Method, 0}, + {"(PublicKey).IsOnCurve", Method, 0}, + {"(PublicKey).Params", Method, 0}, + {"(PublicKey).ScalarBaseMult", Method, 0}, + {"(PublicKey).ScalarMult", Method, 0}, + {"GenerateKey", Func, 0}, + {"PrivateKey", Type, 0}, + {"PrivateKey.D", Field, 0}, + {"PrivateKey.PublicKey", Field, 0}, + {"PublicKey", Type, 0}, + {"PublicKey.Curve", Field, 0}, + {"PublicKey.X", Field, 0}, + {"PublicKey.Y", Field, 0}, + {"Sign", Func, 0}, + {"SignASN1", Func, 15}, + {"Verify", Func, 0}, + {"VerifyASN1", Func, 15}, + }, + "crypto/ed25519": { + {"(*Options).HashFunc", Method, 20}, + {"(PrivateKey).Equal", Method, 15}, + {"(PrivateKey).Public", Method, 13}, + {"(PrivateKey).Seed", Method, 13}, + {"(PrivateKey).Sign", Method, 13}, + {"(PublicKey).Equal", Method, 15}, + {"GenerateKey", Func, 13}, + {"NewKeyFromSeed", Func, 13}, + {"Options", Type, 20}, + {"Options.Context", Field, 20}, + {"Options.Hash", Field, 20}, + {"PrivateKey", Type, 13}, + {"PrivateKeySize", Const, 13}, + {"PublicKey", Type, 13}, + {"PublicKeySize", Const, 13}, + {"SeedSize", Const, 13}, + {"Sign", Func, 13}, + {"SignatureSize", Const, 13}, + {"Verify", Func, 13}, + {"VerifyWithOptions", Func, 20}, + }, + "crypto/elliptic": { + {"(*CurveParams).Add", Method, 0}, + {"(*CurveParams).Double", Method, 0}, + {"(*CurveParams).IsOnCurve", Method, 0}, + {"(*CurveParams).Params", Method, 0}, + {"(*CurveParams).ScalarBaseMult", Method, 0}, + {"(*CurveParams).ScalarMult", Method, 0}, + {"Curve", Type, 0}, + {"CurveParams", Type, 0}, + {"CurveParams.B", Field, 0}, + {"CurveParams.BitSize", Field, 0}, + {"CurveParams.Gx", Field, 0}, + {"CurveParams.Gy", Field, 0}, + {"CurveParams.N", Field, 0}, + {"CurveParams.Name", Field, 5}, + {"CurveParams.P", Field, 0}, + {"GenerateKey", Func, 0}, + {"Marshal", Func, 0}, + {"MarshalCompressed", Func, 15}, + {"P224", Func, 0}, + {"P256", Func, 0}, + {"P384", Func, 0}, + {"P521", Func, 0}, + {"Unmarshal", Func, 0}, + {"UnmarshalCompressed", Func, 15}, + }, + "crypto/hmac": { + {"Equal", Func, 1}, + {"New", Func, 0}, + }, + "crypto/md5": { + {"BlockSize", Const, 0}, + {"New", Func, 0}, + {"Size", Const, 0}, + {"Sum", Func, 2}, + }, + "crypto/rand": { + {"Int", Func, 0}, + {"Prime", Func, 0}, + {"Read", Func, 0}, + {"Reader", Var, 0}, + }, + "crypto/rc4": { + {"(*Cipher).Reset", Method, 0}, + {"(*Cipher).XORKeyStream", Method, 0}, + {"(KeySizeError).Error", Method, 0}, + {"Cipher", Type, 0}, + {"KeySizeError", Type, 0}, + {"NewCipher", Func, 0}, + }, + "crypto/rsa": { + {"(*PSSOptions).HashFunc", Method, 4}, + {"(*PrivateKey).Decrypt", Method, 5}, + {"(*PrivateKey).Equal", Method, 15}, + {"(*PrivateKey).Precompute", Method, 0}, + {"(*PrivateKey).Public", Method, 4}, + {"(*PrivateKey).Sign", Method, 4}, + {"(*PrivateKey).Size", Method, 11}, + {"(*PrivateKey).Validate", Method, 0}, + {"(*PublicKey).Equal", Method, 15}, + {"(*PublicKey).Size", Method, 11}, + {"CRTValue", Type, 0}, + {"CRTValue.Coeff", Field, 0}, + {"CRTValue.Exp", Field, 0}, + {"CRTValue.R", Field, 0}, + {"DecryptOAEP", Func, 0}, + {"DecryptPKCS1v15", Func, 0}, + {"DecryptPKCS1v15SessionKey", Func, 0}, + {"EncryptOAEP", Func, 0}, + {"EncryptPKCS1v15", Func, 0}, + {"ErrDecryption", Var, 0}, + {"ErrMessageTooLong", Var, 0}, + {"ErrVerification", Var, 0}, + {"GenerateKey", Func, 0}, + {"GenerateMultiPrimeKey", Func, 0}, + {"OAEPOptions", Type, 5}, + {"OAEPOptions.Hash", Field, 5}, + {"OAEPOptions.Label", Field, 5}, + {"OAEPOptions.MGFHash", Field, 20}, + {"PKCS1v15DecryptOptions", Type, 5}, + {"PKCS1v15DecryptOptions.SessionKeyLen", Field, 5}, + {"PSSOptions", Type, 2}, + {"PSSOptions.Hash", Field, 4}, + {"PSSOptions.SaltLength", Field, 2}, + {"PSSSaltLengthAuto", Const, 2}, + {"PSSSaltLengthEqualsHash", Const, 2}, + {"PrecomputedValues", Type, 0}, + {"PrecomputedValues.CRTValues", Field, 0}, + {"PrecomputedValues.Dp", Field, 0}, + {"PrecomputedValues.Dq", Field, 0}, + {"PrecomputedValues.Qinv", Field, 0}, + {"PrivateKey", Type, 0}, + {"PrivateKey.D", Field, 0}, + {"PrivateKey.Precomputed", Field, 0}, + {"PrivateKey.Primes", Field, 0}, + {"PrivateKey.PublicKey", Field, 0}, + {"PublicKey", Type, 0}, + {"PublicKey.E", Field, 0}, + {"PublicKey.N", Field, 0}, + {"SignPKCS1v15", Func, 0}, + {"SignPSS", Func, 2}, + {"VerifyPKCS1v15", Func, 0}, + {"VerifyPSS", Func, 2}, + }, + "crypto/sha1": { + {"BlockSize", Const, 0}, + {"New", Func, 0}, + {"Size", Const, 0}, + {"Sum", Func, 2}, + }, + "crypto/sha256": { + {"BlockSize", Const, 0}, + {"New", Func, 0}, + {"New224", Func, 0}, + {"Size", Const, 0}, + {"Size224", Const, 0}, + {"Sum224", Func, 2}, + {"Sum256", Func, 2}, + }, + "crypto/sha512": { + {"BlockSize", Const, 0}, + {"New", Func, 0}, + {"New384", Func, 0}, + {"New512_224", Func, 5}, + {"New512_256", Func, 5}, + {"Size", Const, 0}, + {"Size224", Const, 5}, + {"Size256", Const, 5}, + {"Size384", Const, 0}, + {"Sum384", Func, 2}, + {"Sum512", Func, 2}, + {"Sum512_224", Func, 5}, + {"Sum512_256", Func, 5}, + }, + "crypto/subtle": { + {"ConstantTimeByteEq", Func, 0}, + {"ConstantTimeCompare", Func, 0}, + {"ConstantTimeCopy", Func, 0}, + {"ConstantTimeEq", Func, 0}, + {"ConstantTimeLessOrEq", Func, 2}, + {"ConstantTimeSelect", Func, 0}, + {"XORBytes", Func, 20}, + }, + "crypto/tls": { + {"(*CertificateRequestInfo).Context", Method, 17}, + {"(*CertificateRequestInfo).SupportsCertificate", Method, 14}, + {"(*CertificateVerificationError).Error", Method, 20}, + {"(*CertificateVerificationError).Unwrap", Method, 20}, + {"(*ClientHelloInfo).Context", Method, 17}, + {"(*ClientHelloInfo).SupportsCertificate", Method, 14}, + {"(*ClientSessionState).ResumptionState", Method, 21}, + {"(*Config).BuildNameToCertificate", Method, 0}, + {"(*Config).Clone", Method, 8}, + {"(*Config).DecryptTicket", Method, 21}, + {"(*Config).EncryptTicket", Method, 21}, + {"(*Config).SetSessionTicketKeys", Method, 5}, + {"(*Conn).Close", Method, 0}, + {"(*Conn).CloseWrite", Method, 8}, + {"(*Conn).ConnectionState", Method, 0}, + {"(*Conn).Handshake", Method, 0}, + {"(*Conn).HandshakeContext", Method, 17}, + {"(*Conn).LocalAddr", Method, 0}, + {"(*Conn).NetConn", Method, 18}, + {"(*Conn).OCSPResponse", Method, 0}, + {"(*Conn).Read", Method, 0}, + {"(*Conn).RemoteAddr", Method, 0}, + {"(*Conn).SetDeadline", Method, 0}, + {"(*Conn).SetReadDeadline", Method, 0}, + {"(*Conn).SetWriteDeadline", Method, 0}, + {"(*Conn).VerifyHostname", Method, 0}, + {"(*Conn).Write", Method, 0}, + {"(*ConnectionState).ExportKeyingMaterial", Method, 11}, + {"(*Dialer).Dial", Method, 15}, + {"(*Dialer).DialContext", Method, 15}, + {"(*QUICConn).Close", Method, 21}, + {"(*QUICConn).ConnectionState", Method, 21}, + {"(*QUICConn).HandleData", Method, 21}, + {"(*QUICConn).NextEvent", Method, 21}, + {"(*QUICConn).SendSessionTicket", Method, 21}, + {"(*QUICConn).SetTransportParameters", Method, 21}, + {"(*QUICConn).Start", Method, 21}, + {"(*SessionState).Bytes", Method, 21}, + {"(AlertError).Error", Method, 21}, + {"(ClientAuthType).String", Method, 15}, + {"(CurveID).String", Method, 15}, + {"(QUICEncryptionLevel).String", Method, 21}, + {"(RecordHeaderError).Error", Method, 6}, + {"(SignatureScheme).String", Method, 15}, + {"AlertError", Type, 21}, + {"Certificate", Type, 0}, + {"Certificate.Certificate", Field, 0}, + {"Certificate.Leaf", Field, 0}, + {"Certificate.OCSPStaple", Field, 0}, + {"Certificate.PrivateKey", Field, 0}, + {"Certificate.SignedCertificateTimestamps", Field, 5}, + {"Certificate.SupportedSignatureAlgorithms", Field, 14}, + {"CertificateRequestInfo", Type, 8}, + {"CertificateRequestInfo.AcceptableCAs", Field, 8}, + {"CertificateRequestInfo.SignatureSchemes", Field, 8}, + {"CertificateRequestInfo.Version", Field, 14}, + {"CertificateVerificationError", Type, 20}, + {"CertificateVerificationError.Err", Field, 20}, + {"CertificateVerificationError.UnverifiedCertificates", Field, 20}, + {"CipherSuite", Type, 14}, + {"CipherSuite.ID", Field, 14}, + {"CipherSuite.Insecure", Field, 14}, + {"CipherSuite.Name", Field, 14}, + {"CipherSuite.SupportedVersions", Field, 14}, + {"CipherSuiteName", Func, 14}, + {"CipherSuites", Func, 14}, + {"Client", Func, 0}, + {"ClientAuthType", Type, 0}, + {"ClientHelloInfo", Type, 4}, + {"ClientHelloInfo.CipherSuites", Field, 4}, + {"ClientHelloInfo.Conn", Field, 8}, + {"ClientHelloInfo.ServerName", Field, 4}, + {"ClientHelloInfo.SignatureSchemes", Field, 8}, + {"ClientHelloInfo.SupportedCurves", Field, 4}, + {"ClientHelloInfo.SupportedPoints", Field, 4}, + {"ClientHelloInfo.SupportedProtos", Field, 8}, + {"ClientHelloInfo.SupportedVersions", Field, 8}, + {"ClientSessionCache", Type, 3}, + {"ClientSessionState", Type, 3}, + {"Config", Type, 0}, + {"Config.Certificates", Field, 0}, + {"Config.CipherSuites", Field, 0}, + {"Config.ClientAuth", Field, 0}, + {"Config.ClientCAs", Field, 0}, + {"Config.ClientSessionCache", Field, 3}, + {"Config.CurvePreferences", Field, 3}, + {"Config.DynamicRecordSizingDisabled", Field, 7}, + {"Config.GetCertificate", Field, 4}, + {"Config.GetClientCertificate", Field, 8}, + {"Config.GetConfigForClient", Field, 8}, + {"Config.InsecureSkipVerify", Field, 0}, + {"Config.KeyLogWriter", Field, 8}, + {"Config.MaxVersion", Field, 2}, + {"Config.MinVersion", Field, 2}, + {"Config.NameToCertificate", Field, 0}, + {"Config.NextProtos", Field, 0}, + {"Config.PreferServerCipherSuites", Field, 1}, + {"Config.Rand", Field, 0}, + {"Config.Renegotiation", Field, 7}, + {"Config.RootCAs", Field, 0}, + {"Config.ServerName", Field, 0}, + {"Config.SessionTicketKey", Field, 1}, + {"Config.SessionTicketsDisabled", Field, 1}, + {"Config.Time", Field, 0}, + {"Config.UnwrapSession", Field, 21}, + {"Config.VerifyConnection", Field, 15}, + {"Config.VerifyPeerCertificate", Field, 8}, + {"Config.WrapSession", Field, 21}, + {"Conn", Type, 0}, + {"ConnectionState", Type, 0}, + {"ConnectionState.CipherSuite", Field, 0}, + {"ConnectionState.DidResume", Field, 1}, + {"ConnectionState.HandshakeComplete", Field, 0}, + {"ConnectionState.NegotiatedProtocol", Field, 0}, + {"ConnectionState.NegotiatedProtocolIsMutual", Field, 0}, + {"ConnectionState.OCSPResponse", Field, 5}, + {"ConnectionState.PeerCertificates", Field, 0}, + {"ConnectionState.ServerName", Field, 0}, + {"ConnectionState.SignedCertificateTimestamps", Field, 5}, + {"ConnectionState.TLSUnique", Field, 4}, + {"ConnectionState.VerifiedChains", Field, 0}, + {"ConnectionState.Version", Field, 3}, + {"CurveID", Type, 3}, + {"CurveP256", Const, 3}, + {"CurveP384", Const, 3}, + {"CurveP521", Const, 3}, + {"Dial", Func, 0}, + {"DialWithDialer", Func, 3}, + {"Dialer", Type, 15}, + {"Dialer.Config", Field, 15}, + {"Dialer.NetDialer", Field, 15}, + {"ECDSAWithP256AndSHA256", Const, 8}, + {"ECDSAWithP384AndSHA384", Const, 8}, + {"ECDSAWithP521AndSHA512", Const, 8}, + {"ECDSAWithSHA1", Const, 10}, + {"Ed25519", Const, 13}, + {"InsecureCipherSuites", Func, 14}, + {"Listen", Func, 0}, + {"LoadX509KeyPair", Func, 0}, + {"NewLRUClientSessionCache", Func, 3}, + {"NewListener", Func, 0}, + {"NewResumptionState", Func, 21}, + {"NoClientCert", Const, 0}, + {"PKCS1WithSHA1", Const, 8}, + {"PKCS1WithSHA256", Const, 8}, + {"PKCS1WithSHA384", Const, 8}, + {"PKCS1WithSHA512", Const, 8}, + {"PSSWithSHA256", Const, 8}, + {"PSSWithSHA384", Const, 8}, + {"PSSWithSHA512", Const, 8}, + {"ParseSessionState", Func, 21}, + {"QUICClient", Func, 21}, + {"QUICConfig", Type, 21}, + {"QUICConfig.TLSConfig", Field, 21}, + {"QUICConn", Type, 21}, + {"QUICEncryptionLevel", Type, 21}, + {"QUICEncryptionLevelApplication", Const, 21}, + {"QUICEncryptionLevelEarly", Const, 21}, + {"QUICEncryptionLevelHandshake", Const, 21}, + {"QUICEncryptionLevelInitial", Const, 21}, + {"QUICEvent", Type, 21}, + {"QUICEvent.Data", Field, 21}, + {"QUICEvent.Kind", Field, 21}, + {"QUICEvent.Level", Field, 21}, + {"QUICEvent.Suite", Field, 21}, + {"QUICEventKind", Type, 21}, + {"QUICHandshakeDone", Const, 21}, + {"QUICNoEvent", Const, 21}, + {"QUICRejectedEarlyData", Const, 21}, + {"QUICServer", Func, 21}, + {"QUICSessionTicketOptions", Type, 21}, + {"QUICSessionTicketOptions.EarlyData", Field, 21}, + {"QUICSetReadSecret", Const, 21}, + {"QUICSetWriteSecret", Const, 21}, + {"QUICTransportParameters", Const, 21}, + {"QUICTransportParametersRequired", Const, 21}, + {"QUICWriteData", Const, 21}, + {"RecordHeaderError", Type, 6}, + {"RecordHeaderError.Conn", Field, 12}, + {"RecordHeaderError.Msg", Field, 6}, + {"RecordHeaderError.RecordHeader", Field, 6}, + {"RenegotiateFreelyAsClient", Const, 7}, + {"RenegotiateNever", Const, 7}, + {"RenegotiateOnceAsClient", Const, 7}, + {"RenegotiationSupport", Type, 7}, + {"RequestClientCert", Const, 0}, + {"RequireAndVerifyClientCert", Const, 0}, + {"RequireAnyClientCert", Const, 0}, + {"Server", Func, 0}, + {"SessionState", Type, 21}, + {"SessionState.EarlyData", Field, 21}, + {"SessionState.Extra", Field, 21}, + {"SignatureScheme", Type, 8}, + {"TLS_AES_128_GCM_SHA256", Const, 12}, + {"TLS_AES_256_GCM_SHA384", Const, 12}, + {"TLS_CHACHA20_POLY1305_SHA256", Const, 12}, + {"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", Const, 2}, + {"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", Const, 8}, + {"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", Const, 2}, + {"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", Const, 2}, + {"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", Const, 5}, + {"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", Const, 8}, + {"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14}, + {"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", Const, 2}, + {"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0}, + {"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", Const, 0}, + {"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", Const, 8}, + {"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", Const, 2}, + {"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", Const, 1}, + {"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", Const, 5}, + {"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", Const, 8}, + {"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14}, + {"TLS_ECDHE_RSA_WITH_RC4_128_SHA", Const, 0}, + {"TLS_FALLBACK_SCSV", Const, 4}, + {"TLS_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0}, + {"TLS_RSA_WITH_AES_128_CBC_SHA", Const, 0}, + {"TLS_RSA_WITH_AES_128_CBC_SHA256", Const, 8}, + {"TLS_RSA_WITH_AES_128_GCM_SHA256", Const, 6}, + {"TLS_RSA_WITH_AES_256_CBC_SHA", Const, 1}, + {"TLS_RSA_WITH_AES_256_GCM_SHA384", Const, 6}, + {"TLS_RSA_WITH_RC4_128_SHA", Const, 0}, + {"VerifyClientCertIfGiven", Const, 0}, + {"VersionName", Func, 21}, + {"VersionSSL30", Const, 2}, + {"VersionTLS10", Const, 2}, + {"VersionTLS11", Const, 2}, + {"VersionTLS12", Const, 2}, + {"VersionTLS13", Const, 12}, + {"X25519", Const, 8}, + {"X509KeyPair", Func, 0}, + }, + "crypto/x509": { + {"(*CertPool).AddCert", Method, 0}, + {"(*CertPool).AddCertWithConstraint", Method, 22}, + {"(*CertPool).AppendCertsFromPEM", Method, 0}, + {"(*CertPool).Clone", Method, 19}, + {"(*CertPool).Equal", Method, 19}, + {"(*CertPool).Subjects", Method, 0}, + {"(*Certificate).CheckCRLSignature", Method, 0}, + {"(*Certificate).CheckSignature", Method, 0}, + {"(*Certificate).CheckSignatureFrom", Method, 0}, + {"(*Certificate).CreateCRL", Method, 0}, + {"(*Certificate).Equal", Method, 0}, + {"(*Certificate).Verify", Method, 0}, + {"(*Certificate).VerifyHostname", Method, 0}, + {"(*CertificateRequest).CheckSignature", Method, 5}, + {"(*RevocationList).CheckSignatureFrom", Method, 19}, + {"(CertificateInvalidError).Error", Method, 0}, + {"(ConstraintViolationError).Error", Method, 0}, + {"(HostnameError).Error", Method, 0}, + {"(InsecureAlgorithmError).Error", Method, 6}, + {"(OID).Equal", Method, 22}, + {"(OID).EqualASN1OID", Method, 22}, + {"(OID).String", Method, 22}, + {"(PublicKeyAlgorithm).String", Method, 10}, + {"(SignatureAlgorithm).String", Method, 6}, + {"(SystemRootsError).Error", Method, 1}, + {"(SystemRootsError).Unwrap", Method, 16}, + {"(UnhandledCriticalExtension).Error", Method, 0}, + {"(UnknownAuthorityError).Error", Method, 0}, + {"CANotAuthorizedForExtKeyUsage", Const, 10}, + {"CANotAuthorizedForThisName", Const, 0}, + {"CertPool", Type, 0}, + {"Certificate", Type, 0}, + {"Certificate.AuthorityKeyId", Field, 0}, + {"Certificate.BasicConstraintsValid", Field, 0}, + {"Certificate.CRLDistributionPoints", Field, 2}, + {"Certificate.DNSNames", Field, 0}, + {"Certificate.EmailAddresses", Field, 0}, + {"Certificate.ExcludedDNSDomains", Field, 9}, + {"Certificate.ExcludedEmailAddresses", Field, 10}, + {"Certificate.ExcludedIPRanges", Field, 10}, + {"Certificate.ExcludedURIDomains", Field, 10}, + {"Certificate.ExtKeyUsage", Field, 0}, + {"Certificate.Extensions", Field, 2}, + {"Certificate.ExtraExtensions", Field, 2}, + {"Certificate.IPAddresses", Field, 1}, + {"Certificate.IsCA", Field, 0}, + {"Certificate.Issuer", Field, 0}, + {"Certificate.IssuingCertificateURL", Field, 2}, + {"Certificate.KeyUsage", Field, 0}, + {"Certificate.MaxPathLen", Field, 0}, + {"Certificate.MaxPathLenZero", Field, 4}, + {"Certificate.NotAfter", Field, 0}, + {"Certificate.NotBefore", Field, 0}, + {"Certificate.OCSPServer", Field, 2}, + {"Certificate.PermittedDNSDomains", Field, 0}, + {"Certificate.PermittedDNSDomainsCritical", Field, 0}, + {"Certificate.PermittedEmailAddresses", Field, 10}, + {"Certificate.PermittedIPRanges", Field, 10}, + {"Certificate.PermittedURIDomains", Field, 10}, + {"Certificate.Policies", Field, 22}, + {"Certificate.PolicyIdentifiers", Field, 0}, + {"Certificate.PublicKey", Field, 0}, + {"Certificate.PublicKeyAlgorithm", Field, 0}, + {"Certificate.Raw", Field, 0}, + {"Certificate.RawIssuer", Field, 0}, + {"Certificate.RawSubject", Field, 0}, + {"Certificate.RawSubjectPublicKeyInfo", Field, 0}, + {"Certificate.RawTBSCertificate", Field, 0}, + {"Certificate.SerialNumber", Field, 0}, + {"Certificate.Signature", Field, 0}, + {"Certificate.SignatureAlgorithm", Field, 0}, + {"Certificate.Subject", Field, 0}, + {"Certificate.SubjectKeyId", Field, 0}, + {"Certificate.URIs", Field, 10}, + {"Certificate.UnhandledCriticalExtensions", Field, 5}, + {"Certificate.UnknownExtKeyUsage", Field, 0}, + {"Certificate.Version", Field, 0}, + {"CertificateInvalidError", Type, 0}, + {"CertificateInvalidError.Cert", Field, 0}, + {"CertificateInvalidError.Detail", Field, 10}, + {"CertificateInvalidError.Reason", Field, 0}, + {"CertificateRequest", Type, 3}, + {"CertificateRequest.Attributes", Field, 3}, + {"CertificateRequest.DNSNames", Field, 3}, + {"CertificateRequest.EmailAddresses", Field, 3}, + {"CertificateRequest.Extensions", Field, 3}, + {"CertificateRequest.ExtraExtensions", Field, 3}, + {"CertificateRequest.IPAddresses", Field, 3}, + {"CertificateRequest.PublicKey", Field, 3}, + {"CertificateRequest.PublicKeyAlgorithm", Field, 3}, + {"CertificateRequest.Raw", Field, 3}, + {"CertificateRequest.RawSubject", Field, 3}, + {"CertificateRequest.RawSubjectPublicKeyInfo", Field, 3}, + {"CertificateRequest.RawTBSCertificateRequest", Field, 3}, + {"CertificateRequest.Signature", Field, 3}, + {"CertificateRequest.SignatureAlgorithm", Field, 3}, + {"CertificateRequest.Subject", Field, 3}, + {"CertificateRequest.URIs", Field, 10}, + {"CertificateRequest.Version", Field, 3}, + {"ConstraintViolationError", Type, 0}, + {"CreateCertificate", Func, 0}, + {"CreateCertificateRequest", Func, 3}, + {"CreateRevocationList", Func, 15}, + {"DSA", Const, 0}, + {"DSAWithSHA1", Const, 0}, + {"DSAWithSHA256", Const, 0}, + {"DecryptPEMBlock", Func, 1}, + {"ECDSA", Const, 1}, + {"ECDSAWithSHA1", Const, 1}, + {"ECDSAWithSHA256", Const, 1}, + {"ECDSAWithSHA384", Const, 1}, + {"ECDSAWithSHA512", Const, 1}, + {"Ed25519", Const, 13}, + {"EncryptPEMBlock", Func, 1}, + {"ErrUnsupportedAlgorithm", Var, 0}, + {"Expired", Const, 0}, + {"ExtKeyUsage", Type, 0}, + {"ExtKeyUsageAny", Const, 0}, + {"ExtKeyUsageClientAuth", Const, 0}, + {"ExtKeyUsageCodeSigning", Const, 0}, + {"ExtKeyUsageEmailProtection", Const, 0}, + {"ExtKeyUsageIPSECEndSystem", Const, 1}, + {"ExtKeyUsageIPSECTunnel", Const, 1}, + {"ExtKeyUsageIPSECUser", Const, 1}, + {"ExtKeyUsageMicrosoftCommercialCodeSigning", Const, 10}, + {"ExtKeyUsageMicrosoftKernelCodeSigning", Const, 10}, + {"ExtKeyUsageMicrosoftServerGatedCrypto", Const, 1}, + {"ExtKeyUsageNetscapeServerGatedCrypto", Const, 1}, + {"ExtKeyUsageOCSPSigning", Const, 0}, + {"ExtKeyUsageServerAuth", Const, 0}, + {"ExtKeyUsageTimeStamping", Const, 0}, + {"HostnameError", Type, 0}, + {"HostnameError.Certificate", Field, 0}, + {"HostnameError.Host", Field, 0}, + {"IncompatibleUsage", Const, 1}, + {"IncorrectPasswordError", Var, 1}, + {"InsecureAlgorithmError", Type, 6}, + {"InvalidReason", Type, 0}, + {"IsEncryptedPEMBlock", Func, 1}, + {"KeyUsage", Type, 0}, + {"KeyUsageCRLSign", Const, 0}, + {"KeyUsageCertSign", Const, 0}, + {"KeyUsageContentCommitment", Const, 0}, + {"KeyUsageDataEncipherment", Const, 0}, + {"KeyUsageDecipherOnly", Const, 0}, + {"KeyUsageDigitalSignature", Const, 0}, + {"KeyUsageEncipherOnly", Const, 0}, + {"KeyUsageKeyAgreement", Const, 0}, + {"KeyUsageKeyEncipherment", Const, 0}, + {"MD2WithRSA", Const, 0}, + {"MD5WithRSA", Const, 0}, + {"MarshalECPrivateKey", Func, 2}, + {"MarshalPKCS1PrivateKey", Func, 0}, + {"MarshalPKCS1PublicKey", Func, 10}, + {"MarshalPKCS8PrivateKey", Func, 10}, + {"MarshalPKIXPublicKey", Func, 0}, + {"NameConstraintsWithoutSANs", Const, 10}, + {"NameMismatch", Const, 8}, + {"NewCertPool", Func, 0}, + {"NotAuthorizedToSign", Const, 0}, + {"OID", Type, 22}, + {"OIDFromInts", Func, 22}, + {"PEMCipher", Type, 1}, + {"PEMCipher3DES", Const, 1}, + {"PEMCipherAES128", Const, 1}, + {"PEMCipherAES192", Const, 1}, + {"PEMCipherAES256", Const, 1}, + {"PEMCipherDES", Const, 1}, + {"ParseCRL", Func, 0}, + {"ParseCertificate", Func, 0}, + {"ParseCertificateRequest", Func, 3}, + {"ParseCertificates", Func, 0}, + {"ParseDERCRL", Func, 0}, + {"ParseECPrivateKey", Func, 1}, + {"ParsePKCS1PrivateKey", Func, 0}, + {"ParsePKCS1PublicKey", Func, 10}, + {"ParsePKCS8PrivateKey", Func, 0}, + {"ParsePKIXPublicKey", Func, 0}, + {"ParseRevocationList", Func, 19}, + {"PublicKeyAlgorithm", Type, 0}, + {"PureEd25519", Const, 13}, + {"RSA", Const, 0}, + {"RevocationList", Type, 15}, + {"RevocationList.AuthorityKeyId", Field, 19}, + {"RevocationList.Extensions", Field, 19}, + {"RevocationList.ExtraExtensions", Field, 15}, + {"RevocationList.Issuer", Field, 19}, + {"RevocationList.NextUpdate", Field, 15}, + {"RevocationList.Number", Field, 15}, + {"RevocationList.Raw", Field, 19}, + {"RevocationList.RawIssuer", Field, 19}, + {"RevocationList.RawTBSRevocationList", Field, 19}, + {"RevocationList.RevokedCertificateEntries", Field, 21}, + {"RevocationList.RevokedCertificates", Field, 15}, + {"RevocationList.Signature", Field, 19}, + {"RevocationList.SignatureAlgorithm", Field, 15}, + {"RevocationList.ThisUpdate", Field, 15}, + {"RevocationListEntry", Type, 21}, + {"RevocationListEntry.Extensions", Field, 21}, + {"RevocationListEntry.ExtraExtensions", Field, 21}, + {"RevocationListEntry.Raw", Field, 21}, + {"RevocationListEntry.ReasonCode", Field, 21}, + {"RevocationListEntry.RevocationTime", Field, 21}, + {"RevocationListEntry.SerialNumber", Field, 21}, + {"SHA1WithRSA", Const, 0}, + {"SHA256WithRSA", Const, 0}, + {"SHA256WithRSAPSS", Const, 8}, + {"SHA384WithRSA", Const, 0}, + {"SHA384WithRSAPSS", Const, 8}, + {"SHA512WithRSA", Const, 0}, + {"SHA512WithRSAPSS", Const, 8}, + {"SetFallbackRoots", Func, 20}, + {"SignatureAlgorithm", Type, 0}, + {"SystemCertPool", Func, 7}, + {"SystemRootsError", Type, 1}, + {"SystemRootsError.Err", Field, 7}, + {"TooManyConstraints", Const, 10}, + {"TooManyIntermediates", Const, 0}, + {"UnconstrainedName", Const, 10}, + {"UnhandledCriticalExtension", Type, 0}, + {"UnknownAuthorityError", Type, 0}, + {"UnknownAuthorityError.Cert", Field, 8}, + {"UnknownPublicKeyAlgorithm", Const, 0}, + {"UnknownSignatureAlgorithm", Const, 0}, + {"VerifyOptions", Type, 0}, + {"VerifyOptions.CurrentTime", Field, 0}, + {"VerifyOptions.DNSName", Field, 0}, + {"VerifyOptions.Intermediates", Field, 0}, + {"VerifyOptions.KeyUsages", Field, 1}, + {"VerifyOptions.MaxConstraintComparisions", Field, 10}, + {"VerifyOptions.Roots", Field, 0}, + }, + "crypto/x509/pkix": { + {"(*CertificateList).HasExpired", Method, 0}, + {"(*Name).FillFromRDNSequence", Method, 0}, + {"(Name).String", Method, 10}, + {"(Name).ToRDNSequence", Method, 0}, + {"(RDNSequence).String", Method, 10}, + {"AlgorithmIdentifier", Type, 0}, + {"AlgorithmIdentifier.Algorithm", Field, 0}, + {"AlgorithmIdentifier.Parameters", Field, 0}, + {"AttributeTypeAndValue", Type, 0}, + {"AttributeTypeAndValue.Type", Field, 0}, + {"AttributeTypeAndValue.Value", Field, 0}, + {"AttributeTypeAndValueSET", Type, 3}, + {"AttributeTypeAndValueSET.Type", Field, 3}, + {"AttributeTypeAndValueSET.Value", Field, 3}, + {"CertificateList", Type, 0}, + {"CertificateList.SignatureAlgorithm", Field, 0}, + {"CertificateList.SignatureValue", Field, 0}, + {"CertificateList.TBSCertList", Field, 0}, + {"Extension", Type, 0}, + {"Extension.Critical", Field, 0}, + {"Extension.Id", Field, 0}, + {"Extension.Value", Field, 0}, + {"Name", Type, 0}, + {"Name.CommonName", Field, 0}, + {"Name.Country", Field, 0}, + {"Name.ExtraNames", Field, 5}, + {"Name.Locality", Field, 0}, + {"Name.Names", Field, 0}, + {"Name.Organization", Field, 0}, + {"Name.OrganizationalUnit", Field, 0}, + {"Name.PostalCode", Field, 0}, + {"Name.Province", Field, 0}, + {"Name.SerialNumber", Field, 0}, + {"Name.StreetAddress", Field, 0}, + {"RDNSequence", Type, 0}, + {"RelativeDistinguishedNameSET", Type, 0}, + {"RevokedCertificate", Type, 0}, + {"RevokedCertificate.Extensions", Field, 0}, + {"RevokedCertificate.RevocationTime", Field, 0}, + {"RevokedCertificate.SerialNumber", Field, 0}, + {"TBSCertificateList", Type, 0}, + {"TBSCertificateList.Extensions", Field, 0}, + {"TBSCertificateList.Issuer", Field, 0}, + {"TBSCertificateList.NextUpdate", Field, 0}, + {"TBSCertificateList.Raw", Field, 0}, + {"TBSCertificateList.RevokedCertificates", Field, 0}, + {"TBSCertificateList.Signature", Field, 0}, + {"TBSCertificateList.ThisUpdate", Field, 0}, + {"TBSCertificateList.Version", Field, 0}, + }, + "database/sql": { + {"(*ColumnType).DatabaseTypeName", Method, 8}, + {"(*ColumnType).DecimalSize", Method, 8}, + {"(*ColumnType).Length", Method, 8}, + {"(*ColumnType).Name", Method, 8}, + {"(*ColumnType).Nullable", Method, 8}, + {"(*ColumnType).ScanType", Method, 8}, + {"(*Conn).BeginTx", Method, 9}, + {"(*Conn).Close", Method, 9}, + {"(*Conn).ExecContext", Method, 9}, + {"(*Conn).PingContext", Method, 9}, + {"(*Conn).PrepareContext", Method, 9}, + {"(*Conn).QueryContext", Method, 9}, + {"(*Conn).QueryRowContext", Method, 9}, + {"(*Conn).Raw", Method, 13}, + {"(*DB).Begin", Method, 0}, + {"(*DB).BeginTx", Method, 8}, + {"(*DB).Close", Method, 0}, + {"(*DB).Conn", Method, 9}, + {"(*DB).Driver", Method, 0}, + {"(*DB).Exec", Method, 0}, + {"(*DB).ExecContext", Method, 8}, + {"(*DB).Ping", Method, 1}, + {"(*DB).PingContext", Method, 8}, + {"(*DB).Prepare", Method, 0}, + {"(*DB).PrepareContext", Method, 8}, + {"(*DB).Query", Method, 0}, + {"(*DB).QueryContext", Method, 8}, + {"(*DB).QueryRow", Method, 0}, + {"(*DB).QueryRowContext", Method, 8}, + {"(*DB).SetConnMaxIdleTime", Method, 15}, + {"(*DB).SetConnMaxLifetime", Method, 6}, + {"(*DB).SetMaxIdleConns", Method, 1}, + {"(*DB).SetMaxOpenConns", Method, 2}, + {"(*DB).Stats", Method, 5}, + {"(*Null).Scan", Method, 22}, + {"(*NullBool).Scan", Method, 0}, + {"(*NullByte).Scan", Method, 17}, + {"(*NullFloat64).Scan", Method, 0}, + {"(*NullInt16).Scan", Method, 17}, + {"(*NullInt32).Scan", Method, 13}, + {"(*NullInt64).Scan", Method, 0}, + {"(*NullString).Scan", Method, 0}, + {"(*NullTime).Scan", Method, 13}, + {"(*Row).Err", Method, 15}, + {"(*Row).Scan", Method, 0}, + {"(*Rows).Close", Method, 0}, + {"(*Rows).ColumnTypes", Method, 8}, + {"(*Rows).Columns", Method, 0}, + {"(*Rows).Err", Method, 0}, + {"(*Rows).Next", Method, 0}, + {"(*Rows).NextResultSet", Method, 8}, + {"(*Rows).Scan", Method, 0}, + {"(*Stmt).Close", Method, 0}, + {"(*Stmt).Exec", Method, 0}, + {"(*Stmt).ExecContext", Method, 8}, + {"(*Stmt).Query", Method, 0}, + {"(*Stmt).QueryContext", Method, 8}, + {"(*Stmt).QueryRow", Method, 0}, + {"(*Stmt).QueryRowContext", Method, 8}, + {"(*Tx).Commit", Method, 0}, + {"(*Tx).Exec", Method, 0}, + {"(*Tx).ExecContext", Method, 8}, + {"(*Tx).Prepare", Method, 0}, + {"(*Tx).PrepareContext", Method, 8}, + {"(*Tx).Query", Method, 0}, + {"(*Tx).QueryContext", Method, 8}, + {"(*Tx).QueryRow", Method, 0}, + {"(*Tx).QueryRowContext", Method, 8}, + {"(*Tx).Rollback", Method, 0}, + {"(*Tx).Stmt", Method, 0}, + {"(*Tx).StmtContext", Method, 8}, + {"(IsolationLevel).String", Method, 11}, + {"(Null).Value", Method, 22}, + {"(NullBool).Value", Method, 0}, + {"(NullByte).Value", Method, 17}, + {"(NullFloat64).Value", Method, 0}, + {"(NullInt16).Value", Method, 17}, + {"(NullInt32).Value", Method, 13}, + {"(NullInt64).Value", Method, 0}, + {"(NullString).Value", Method, 0}, + {"(NullTime).Value", Method, 13}, + {"ColumnType", Type, 8}, + {"Conn", Type, 9}, + {"DB", Type, 0}, + {"DBStats", Type, 5}, + {"DBStats.Idle", Field, 11}, + {"DBStats.InUse", Field, 11}, + {"DBStats.MaxIdleClosed", Field, 11}, + {"DBStats.MaxIdleTimeClosed", Field, 15}, + {"DBStats.MaxLifetimeClosed", Field, 11}, + {"DBStats.MaxOpenConnections", Field, 11}, + {"DBStats.OpenConnections", Field, 5}, + {"DBStats.WaitCount", Field, 11}, + {"DBStats.WaitDuration", Field, 11}, + {"Drivers", Func, 4}, + {"ErrConnDone", Var, 9}, + {"ErrNoRows", Var, 0}, + {"ErrTxDone", Var, 0}, + {"IsolationLevel", Type, 8}, + {"LevelDefault", Const, 8}, + {"LevelLinearizable", Const, 8}, + {"LevelReadCommitted", Const, 8}, + {"LevelReadUncommitted", Const, 8}, + {"LevelRepeatableRead", Const, 8}, + {"LevelSerializable", Const, 8}, + {"LevelSnapshot", Const, 8}, + {"LevelWriteCommitted", Const, 8}, + {"Named", Func, 8}, + {"NamedArg", Type, 8}, + {"NamedArg.Name", Field, 8}, + {"NamedArg.Value", Field, 8}, + {"Null", Type, 22}, + {"Null.V", Field, 22}, + {"Null.Valid", Field, 22}, + {"NullBool", Type, 0}, + {"NullBool.Bool", Field, 0}, + {"NullBool.Valid", Field, 0}, + {"NullByte", Type, 17}, + {"NullByte.Byte", Field, 17}, + {"NullByte.Valid", Field, 17}, + {"NullFloat64", Type, 0}, + {"NullFloat64.Float64", Field, 0}, + {"NullFloat64.Valid", Field, 0}, + {"NullInt16", Type, 17}, + {"NullInt16.Int16", Field, 17}, + {"NullInt16.Valid", Field, 17}, + {"NullInt32", Type, 13}, + {"NullInt32.Int32", Field, 13}, + {"NullInt32.Valid", Field, 13}, + {"NullInt64", Type, 0}, + {"NullInt64.Int64", Field, 0}, + {"NullInt64.Valid", Field, 0}, + {"NullString", Type, 0}, + {"NullString.String", Field, 0}, + {"NullString.Valid", Field, 0}, + {"NullTime", Type, 13}, + {"NullTime.Time", Field, 13}, + {"NullTime.Valid", Field, 13}, + {"Open", Func, 0}, + {"OpenDB", Func, 10}, + {"Out", Type, 9}, + {"Out.Dest", Field, 9}, + {"Out.In", Field, 9}, + {"RawBytes", Type, 0}, + {"Register", Func, 0}, + {"Result", Type, 0}, + {"Row", Type, 0}, + {"Rows", Type, 0}, + {"Scanner", Type, 0}, + {"Stmt", Type, 0}, + {"Tx", Type, 0}, + {"TxOptions", Type, 8}, + {"TxOptions.Isolation", Field, 8}, + {"TxOptions.ReadOnly", Field, 8}, + }, + "database/sql/driver": { + {"(NotNull).ConvertValue", Method, 0}, + {"(Null).ConvertValue", Method, 0}, + {"(RowsAffected).LastInsertId", Method, 0}, + {"(RowsAffected).RowsAffected", Method, 0}, + {"Bool", Var, 0}, + {"ColumnConverter", Type, 0}, + {"Conn", Type, 0}, + {"ConnBeginTx", Type, 8}, + {"ConnPrepareContext", Type, 8}, + {"Connector", Type, 10}, + {"DefaultParameterConverter", Var, 0}, + {"Driver", Type, 0}, + {"DriverContext", Type, 10}, + {"ErrBadConn", Var, 0}, + {"ErrRemoveArgument", Var, 9}, + {"ErrSkip", Var, 0}, + {"Execer", Type, 0}, + {"ExecerContext", Type, 8}, + {"Int32", Var, 0}, + {"IsScanValue", Func, 0}, + {"IsValue", Func, 0}, + {"IsolationLevel", Type, 8}, + {"NamedValue", Type, 8}, + {"NamedValue.Name", Field, 8}, + {"NamedValue.Ordinal", Field, 8}, + {"NamedValue.Value", Field, 8}, + {"NamedValueChecker", Type, 9}, + {"NotNull", Type, 0}, + {"NotNull.Converter", Field, 0}, + {"Null", Type, 0}, + {"Null.Converter", Field, 0}, + {"Pinger", Type, 8}, + {"Queryer", Type, 1}, + {"QueryerContext", Type, 8}, + {"Result", Type, 0}, + {"ResultNoRows", Var, 0}, + {"Rows", Type, 0}, + {"RowsAffected", Type, 0}, + {"RowsColumnTypeDatabaseTypeName", Type, 8}, + {"RowsColumnTypeLength", Type, 8}, + {"RowsColumnTypeNullable", Type, 8}, + {"RowsColumnTypePrecisionScale", Type, 8}, + {"RowsColumnTypeScanType", Type, 8}, + {"RowsNextResultSet", Type, 8}, + {"SessionResetter", Type, 10}, + {"Stmt", Type, 0}, + {"StmtExecContext", Type, 8}, + {"StmtQueryContext", Type, 8}, + {"String", Var, 0}, + {"Tx", Type, 0}, + {"TxOptions", Type, 8}, + {"TxOptions.Isolation", Field, 8}, + {"TxOptions.ReadOnly", Field, 8}, + {"Validator", Type, 15}, + {"Value", Type, 0}, + {"ValueConverter", Type, 0}, + {"Valuer", Type, 0}, + }, + "debug/buildinfo": { + {"BuildInfo", Type, 18}, + {"Read", Func, 18}, + {"ReadFile", Func, 18}, + }, + "debug/dwarf": { + {"(*AddrType).Basic", Method, 0}, + {"(*AddrType).Common", Method, 0}, + {"(*AddrType).Size", Method, 0}, + {"(*AddrType).String", Method, 0}, + {"(*ArrayType).Common", Method, 0}, + {"(*ArrayType).Size", Method, 0}, + {"(*ArrayType).String", Method, 0}, + {"(*BasicType).Basic", Method, 0}, + {"(*BasicType).Common", Method, 0}, + {"(*BasicType).Size", Method, 0}, + {"(*BasicType).String", Method, 0}, + {"(*BoolType).Basic", Method, 0}, + {"(*BoolType).Common", Method, 0}, + {"(*BoolType).Size", Method, 0}, + {"(*BoolType).String", Method, 0}, + {"(*CharType).Basic", Method, 0}, + {"(*CharType).Common", Method, 0}, + {"(*CharType).Size", Method, 0}, + {"(*CharType).String", Method, 0}, + {"(*CommonType).Common", Method, 0}, + {"(*CommonType).Size", Method, 0}, + {"(*ComplexType).Basic", Method, 0}, + {"(*ComplexType).Common", Method, 0}, + {"(*ComplexType).Size", Method, 0}, + {"(*ComplexType).String", Method, 0}, + {"(*Data).AddSection", Method, 14}, + {"(*Data).AddTypes", Method, 3}, + {"(*Data).LineReader", Method, 5}, + {"(*Data).Ranges", Method, 7}, + {"(*Data).Reader", Method, 0}, + {"(*Data).Type", Method, 0}, + {"(*DotDotDotType).Common", Method, 0}, + {"(*DotDotDotType).Size", Method, 0}, + {"(*DotDotDotType).String", Method, 0}, + {"(*Entry).AttrField", Method, 5}, + {"(*Entry).Val", Method, 0}, + {"(*EnumType).Common", Method, 0}, + {"(*EnumType).Size", Method, 0}, + {"(*EnumType).String", Method, 0}, + {"(*FloatType).Basic", Method, 0}, + {"(*FloatType).Common", Method, 0}, + {"(*FloatType).Size", Method, 0}, + {"(*FloatType).String", Method, 0}, + {"(*FuncType).Common", Method, 0}, + {"(*FuncType).Size", Method, 0}, + {"(*FuncType).String", Method, 0}, + {"(*IntType).Basic", Method, 0}, + {"(*IntType).Common", Method, 0}, + {"(*IntType).Size", Method, 0}, + {"(*IntType).String", Method, 0}, + {"(*LineReader).Files", Method, 14}, + {"(*LineReader).Next", Method, 5}, + {"(*LineReader).Reset", Method, 5}, + {"(*LineReader).Seek", Method, 5}, + {"(*LineReader).SeekPC", Method, 5}, + {"(*LineReader).Tell", Method, 5}, + {"(*PtrType).Common", Method, 0}, + {"(*PtrType).Size", Method, 0}, + {"(*PtrType).String", Method, 0}, + {"(*QualType).Common", Method, 0}, + {"(*QualType).Size", Method, 0}, + {"(*QualType).String", Method, 0}, + {"(*Reader).AddressSize", Method, 5}, + {"(*Reader).ByteOrder", Method, 14}, + {"(*Reader).Next", Method, 0}, + {"(*Reader).Seek", Method, 0}, + {"(*Reader).SeekPC", Method, 7}, + {"(*Reader).SkipChildren", Method, 0}, + {"(*StructType).Common", Method, 0}, + {"(*StructType).Defn", Method, 0}, + {"(*StructType).Size", Method, 0}, + {"(*StructType).String", Method, 0}, + {"(*TypedefType).Common", Method, 0}, + {"(*TypedefType).Size", Method, 0}, + {"(*TypedefType).String", Method, 0}, + {"(*UcharType).Basic", Method, 0}, + {"(*UcharType).Common", Method, 0}, + {"(*UcharType).Size", Method, 0}, + {"(*UcharType).String", Method, 0}, + {"(*UintType).Basic", Method, 0}, + {"(*UintType).Common", Method, 0}, + {"(*UintType).Size", Method, 0}, + {"(*UintType).String", Method, 0}, + {"(*UnspecifiedType).Basic", Method, 4}, + {"(*UnspecifiedType).Common", Method, 4}, + {"(*UnspecifiedType).Size", Method, 4}, + {"(*UnspecifiedType).String", Method, 4}, + {"(*UnsupportedType).Common", Method, 13}, + {"(*UnsupportedType).Size", Method, 13}, + {"(*UnsupportedType).String", Method, 13}, + {"(*VoidType).Common", Method, 0}, + {"(*VoidType).Size", Method, 0}, + {"(*VoidType).String", Method, 0}, + {"(Attr).GoString", Method, 0}, + {"(Attr).String", Method, 0}, + {"(Class).GoString", Method, 5}, + {"(Class).String", Method, 5}, + {"(DecodeError).Error", Method, 0}, + {"(Tag).GoString", Method, 0}, + {"(Tag).String", Method, 0}, + {"AddrType", Type, 0}, + {"AddrType.BasicType", Field, 0}, + {"ArrayType", Type, 0}, + {"ArrayType.CommonType", Field, 0}, + {"ArrayType.Count", Field, 0}, + {"ArrayType.StrideBitSize", Field, 0}, + {"ArrayType.Type", Field, 0}, + {"Attr", Type, 0}, + {"AttrAbstractOrigin", Const, 0}, + {"AttrAccessibility", Const, 0}, + {"AttrAddrBase", Const, 14}, + {"AttrAddrClass", Const, 0}, + {"AttrAlignment", Const, 14}, + {"AttrAllocated", Const, 0}, + {"AttrArtificial", Const, 0}, + {"AttrAssociated", Const, 0}, + {"AttrBaseTypes", Const, 0}, + {"AttrBinaryScale", Const, 14}, + {"AttrBitOffset", Const, 0}, + {"AttrBitSize", Const, 0}, + {"AttrByteSize", Const, 0}, + {"AttrCallAllCalls", Const, 14}, + {"AttrCallAllSourceCalls", Const, 14}, + {"AttrCallAllTailCalls", Const, 14}, + {"AttrCallColumn", Const, 0}, + {"AttrCallDataLocation", Const, 14}, + {"AttrCallDataValue", Const, 14}, + {"AttrCallFile", Const, 0}, + {"AttrCallLine", Const, 0}, + {"AttrCallOrigin", Const, 14}, + {"AttrCallPC", Const, 14}, + {"AttrCallParameter", Const, 14}, + {"AttrCallReturnPC", Const, 14}, + {"AttrCallTailCall", Const, 14}, + {"AttrCallTarget", Const, 14}, + {"AttrCallTargetClobbered", Const, 14}, + {"AttrCallValue", Const, 14}, + {"AttrCalling", Const, 0}, + {"AttrCommonRef", Const, 0}, + {"AttrCompDir", Const, 0}, + {"AttrConstExpr", Const, 14}, + {"AttrConstValue", Const, 0}, + {"AttrContainingType", Const, 0}, + {"AttrCount", Const, 0}, + {"AttrDataBitOffset", Const, 14}, + {"AttrDataLocation", Const, 0}, + {"AttrDataMemberLoc", Const, 0}, + {"AttrDecimalScale", Const, 14}, + {"AttrDecimalSign", Const, 14}, + {"AttrDeclColumn", Const, 0}, + {"AttrDeclFile", Const, 0}, + {"AttrDeclLine", Const, 0}, + {"AttrDeclaration", Const, 0}, + {"AttrDefaultValue", Const, 0}, + {"AttrDefaulted", Const, 14}, + {"AttrDeleted", Const, 14}, + {"AttrDescription", Const, 0}, + {"AttrDigitCount", Const, 14}, + {"AttrDiscr", Const, 0}, + {"AttrDiscrList", Const, 0}, + {"AttrDiscrValue", Const, 0}, + {"AttrDwoName", Const, 14}, + {"AttrElemental", Const, 14}, + {"AttrEncoding", Const, 0}, + {"AttrEndianity", Const, 14}, + {"AttrEntrypc", Const, 0}, + {"AttrEnumClass", Const, 14}, + {"AttrExplicit", Const, 14}, + {"AttrExportSymbols", Const, 14}, + {"AttrExtension", Const, 0}, + {"AttrExternal", Const, 0}, + {"AttrFrameBase", Const, 0}, + {"AttrFriend", Const, 0}, + {"AttrHighpc", Const, 0}, + {"AttrIdentifierCase", Const, 0}, + {"AttrImport", Const, 0}, + {"AttrInline", Const, 0}, + {"AttrIsOptional", Const, 0}, + {"AttrLanguage", Const, 0}, + {"AttrLinkageName", Const, 14}, + {"AttrLocation", Const, 0}, + {"AttrLoclistsBase", Const, 14}, + {"AttrLowerBound", Const, 0}, + {"AttrLowpc", Const, 0}, + {"AttrMacroInfo", Const, 0}, + {"AttrMacros", Const, 14}, + {"AttrMainSubprogram", Const, 14}, + {"AttrMutable", Const, 14}, + {"AttrName", Const, 0}, + {"AttrNamelistItem", Const, 0}, + {"AttrNoreturn", Const, 14}, + {"AttrObjectPointer", Const, 14}, + {"AttrOrdering", Const, 0}, + {"AttrPictureString", Const, 14}, + {"AttrPriority", Const, 0}, + {"AttrProducer", Const, 0}, + {"AttrPrototyped", Const, 0}, + {"AttrPure", Const, 14}, + {"AttrRanges", Const, 0}, + {"AttrRank", Const, 14}, + {"AttrRecursive", Const, 14}, + {"AttrReference", Const, 14}, + {"AttrReturnAddr", Const, 0}, + {"AttrRnglistsBase", Const, 14}, + {"AttrRvalueReference", Const, 14}, + {"AttrSegment", Const, 0}, + {"AttrSibling", Const, 0}, + {"AttrSignature", Const, 14}, + {"AttrSmall", Const, 14}, + {"AttrSpecification", Const, 0}, + {"AttrStartScope", Const, 0}, + {"AttrStaticLink", Const, 0}, + {"AttrStmtList", Const, 0}, + {"AttrStrOffsetsBase", Const, 14}, + {"AttrStride", Const, 0}, + {"AttrStrideSize", Const, 0}, + {"AttrStringLength", Const, 0}, + {"AttrStringLengthBitSize", Const, 14}, + {"AttrStringLengthByteSize", Const, 14}, + {"AttrThreadsScaled", Const, 14}, + {"AttrTrampoline", Const, 0}, + {"AttrType", Const, 0}, + {"AttrUpperBound", Const, 0}, + {"AttrUseLocation", Const, 0}, + {"AttrUseUTF8", Const, 0}, + {"AttrVarParam", Const, 0}, + {"AttrVirtuality", Const, 0}, + {"AttrVisibility", Const, 0}, + {"AttrVtableElemLoc", Const, 0}, + {"BasicType", Type, 0}, + {"BasicType.BitOffset", Field, 0}, + {"BasicType.BitSize", Field, 0}, + {"BasicType.CommonType", Field, 0}, + {"BasicType.DataBitOffset", Field, 18}, + {"BoolType", Type, 0}, + {"BoolType.BasicType", Field, 0}, + {"CharType", Type, 0}, + {"CharType.BasicType", Field, 0}, + {"Class", Type, 5}, + {"ClassAddrPtr", Const, 14}, + {"ClassAddress", Const, 5}, + {"ClassBlock", Const, 5}, + {"ClassConstant", Const, 5}, + {"ClassExprLoc", Const, 5}, + {"ClassFlag", Const, 5}, + {"ClassLinePtr", Const, 5}, + {"ClassLocList", Const, 14}, + {"ClassLocListPtr", Const, 5}, + {"ClassMacPtr", Const, 5}, + {"ClassRangeListPtr", Const, 5}, + {"ClassReference", Const, 5}, + {"ClassReferenceAlt", Const, 5}, + {"ClassReferenceSig", Const, 5}, + {"ClassRngList", Const, 14}, + {"ClassRngListsPtr", Const, 14}, + {"ClassStrOffsetsPtr", Const, 14}, + {"ClassString", Const, 5}, + {"ClassStringAlt", Const, 5}, + {"ClassUnknown", Const, 6}, + {"CommonType", Type, 0}, + {"CommonType.ByteSize", Field, 0}, + {"CommonType.Name", Field, 0}, + {"ComplexType", Type, 0}, + {"ComplexType.BasicType", Field, 0}, + {"Data", Type, 0}, + {"DecodeError", Type, 0}, + {"DecodeError.Err", Field, 0}, + {"DecodeError.Name", Field, 0}, + {"DecodeError.Offset", Field, 0}, + {"DotDotDotType", Type, 0}, + {"DotDotDotType.CommonType", Field, 0}, + {"Entry", Type, 0}, + {"Entry.Children", Field, 0}, + {"Entry.Field", Field, 0}, + {"Entry.Offset", Field, 0}, + {"Entry.Tag", Field, 0}, + {"EnumType", Type, 0}, + {"EnumType.CommonType", Field, 0}, + {"EnumType.EnumName", Field, 0}, + {"EnumType.Val", Field, 0}, + {"EnumValue", Type, 0}, + {"EnumValue.Name", Field, 0}, + {"EnumValue.Val", Field, 0}, + {"ErrUnknownPC", Var, 5}, + {"Field", Type, 0}, + {"Field.Attr", Field, 0}, + {"Field.Class", Field, 5}, + {"Field.Val", Field, 0}, + {"FloatType", Type, 0}, + {"FloatType.BasicType", Field, 0}, + {"FuncType", Type, 0}, + {"FuncType.CommonType", Field, 0}, + {"FuncType.ParamType", Field, 0}, + {"FuncType.ReturnType", Field, 0}, + {"IntType", Type, 0}, + {"IntType.BasicType", Field, 0}, + {"LineEntry", Type, 5}, + {"LineEntry.Address", Field, 5}, + {"LineEntry.BasicBlock", Field, 5}, + {"LineEntry.Column", Field, 5}, + {"LineEntry.Discriminator", Field, 5}, + {"LineEntry.EndSequence", Field, 5}, + {"LineEntry.EpilogueBegin", Field, 5}, + {"LineEntry.File", Field, 5}, + {"LineEntry.ISA", Field, 5}, + {"LineEntry.IsStmt", Field, 5}, + {"LineEntry.Line", Field, 5}, + {"LineEntry.OpIndex", Field, 5}, + {"LineEntry.PrologueEnd", Field, 5}, + {"LineFile", Type, 5}, + {"LineFile.Length", Field, 5}, + {"LineFile.Mtime", Field, 5}, + {"LineFile.Name", Field, 5}, + {"LineReader", Type, 5}, + {"LineReaderPos", Type, 5}, + {"New", Func, 0}, + {"Offset", Type, 0}, + {"PtrType", Type, 0}, + {"PtrType.CommonType", Field, 0}, + {"PtrType.Type", Field, 0}, + {"QualType", Type, 0}, + {"QualType.CommonType", Field, 0}, + {"QualType.Qual", Field, 0}, + {"QualType.Type", Field, 0}, + {"Reader", Type, 0}, + {"StructField", Type, 0}, + {"StructField.BitOffset", Field, 0}, + {"StructField.BitSize", Field, 0}, + {"StructField.ByteOffset", Field, 0}, + {"StructField.ByteSize", Field, 0}, + {"StructField.DataBitOffset", Field, 18}, + {"StructField.Name", Field, 0}, + {"StructField.Type", Field, 0}, + {"StructType", Type, 0}, + {"StructType.CommonType", Field, 0}, + {"StructType.Field", Field, 0}, + {"StructType.Incomplete", Field, 0}, + {"StructType.Kind", Field, 0}, + {"StructType.StructName", Field, 0}, + {"Tag", Type, 0}, + {"TagAccessDeclaration", Const, 0}, + {"TagArrayType", Const, 0}, + {"TagAtomicType", Const, 14}, + {"TagBaseType", Const, 0}, + {"TagCallSite", Const, 14}, + {"TagCallSiteParameter", Const, 14}, + {"TagCatchDwarfBlock", Const, 0}, + {"TagClassType", Const, 0}, + {"TagCoarrayType", Const, 14}, + {"TagCommonDwarfBlock", Const, 0}, + {"TagCommonInclusion", Const, 0}, + {"TagCompileUnit", Const, 0}, + {"TagCondition", Const, 3}, + {"TagConstType", Const, 0}, + {"TagConstant", Const, 0}, + {"TagDwarfProcedure", Const, 0}, + {"TagDynamicType", Const, 14}, + {"TagEntryPoint", Const, 0}, + {"TagEnumerationType", Const, 0}, + {"TagEnumerator", Const, 0}, + {"TagFileType", Const, 0}, + {"TagFormalParameter", Const, 0}, + {"TagFriend", Const, 0}, + {"TagGenericSubrange", Const, 14}, + {"TagImmutableType", Const, 14}, + {"TagImportedDeclaration", Const, 0}, + {"TagImportedModule", Const, 0}, + {"TagImportedUnit", Const, 0}, + {"TagInheritance", Const, 0}, + {"TagInlinedSubroutine", Const, 0}, + {"TagInterfaceType", Const, 0}, + {"TagLabel", Const, 0}, + {"TagLexDwarfBlock", Const, 0}, + {"TagMember", Const, 0}, + {"TagModule", Const, 0}, + {"TagMutableType", Const, 0}, + {"TagNamelist", Const, 0}, + {"TagNamelistItem", Const, 0}, + {"TagNamespace", Const, 0}, + {"TagPackedType", Const, 0}, + {"TagPartialUnit", Const, 0}, + {"TagPointerType", Const, 0}, + {"TagPtrToMemberType", Const, 0}, + {"TagReferenceType", Const, 0}, + {"TagRestrictType", Const, 0}, + {"TagRvalueReferenceType", Const, 3}, + {"TagSetType", Const, 0}, + {"TagSharedType", Const, 3}, + {"TagSkeletonUnit", Const, 14}, + {"TagStringType", Const, 0}, + {"TagStructType", Const, 0}, + {"TagSubprogram", Const, 0}, + {"TagSubrangeType", Const, 0}, + {"TagSubroutineType", Const, 0}, + {"TagTemplateAlias", Const, 3}, + {"TagTemplateTypeParameter", Const, 0}, + {"TagTemplateValueParameter", Const, 0}, + {"TagThrownType", Const, 0}, + {"TagTryDwarfBlock", Const, 0}, + {"TagTypeUnit", Const, 3}, + {"TagTypedef", Const, 0}, + {"TagUnionType", Const, 0}, + {"TagUnspecifiedParameters", Const, 0}, + {"TagUnspecifiedType", Const, 0}, + {"TagVariable", Const, 0}, + {"TagVariant", Const, 0}, + {"TagVariantPart", Const, 0}, + {"TagVolatileType", Const, 0}, + {"TagWithStmt", Const, 0}, + {"Type", Type, 0}, + {"TypedefType", Type, 0}, + {"TypedefType.CommonType", Field, 0}, + {"TypedefType.Type", Field, 0}, + {"UcharType", Type, 0}, + {"UcharType.BasicType", Field, 0}, + {"UintType", Type, 0}, + {"UintType.BasicType", Field, 0}, + {"UnspecifiedType", Type, 4}, + {"UnspecifiedType.BasicType", Field, 4}, + {"UnsupportedType", Type, 13}, + {"UnsupportedType.CommonType", Field, 13}, + {"UnsupportedType.Tag", Field, 13}, + {"VoidType", Type, 0}, + {"VoidType.CommonType", Field, 0}, + }, + "debug/elf": { + {"(*File).Close", Method, 0}, + {"(*File).DWARF", Method, 0}, + {"(*File).DynString", Method, 1}, + {"(*File).DynValue", Method, 21}, + {"(*File).DynamicSymbols", Method, 4}, + {"(*File).ImportedLibraries", Method, 0}, + {"(*File).ImportedSymbols", Method, 0}, + {"(*File).Section", Method, 0}, + {"(*File).SectionByType", Method, 0}, + {"(*File).Symbols", Method, 0}, + {"(*FormatError).Error", Method, 0}, + {"(*Prog).Open", Method, 0}, + {"(*Section).Data", Method, 0}, + {"(*Section).Open", Method, 0}, + {"(Class).GoString", Method, 0}, + {"(Class).String", Method, 0}, + {"(CompressionType).GoString", Method, 6}, + {"(CompressionType).String", Method, 6}, + {"(Data).GoString", Method, 0}, + {"(Data).String", Method, 0}, + {"(DynFlag).GoString", Method, 0}, + {"(DynFlag).String", Method, 0}, + {"(DynFlag1).GoString", Method, 21}, + {"(DynFlag1).String", Method, 21}, + {"(DynTag).GoString", Method, 0}, + {"(DynTag).String", Method, 0}, + {"(Machine).GoString", Method, 0}, + {"(Machine).String", Method, 0}, + {"(NType).GoString", Method, 0}, + {"(NType).String", Method, 0}, + {"(OSABI).GoString", Method, 0}, + {"(OSABI).String", Method, 0}, + {"(Prog).ReadAt", Method, 0}, + {"(ProgFlag).GoString", Method, 0}, + {"(ProgFlag).String", Method, 0}, + {"(ProgType).GoString", Method, 0}, + {"(ProgType).String", Method, 0}, + {"(R_386).GoString", Method, 0}, + {"(R_386).String", Method, 0}, + {"(R_390).GoString", Method, 7}, + {"(R_390).String", Method, 7}, + {"(R_AARCH64).GoString", Method, 4}, + {"(R_AARCH64).String", Method, 4}, + {"(R_ALPHA).GoString", Method, 0}, + {"(R_ALPHA).String", Method, 0}, + {"(R_ARM).GoString", Method, 0}, + {"(R_ARM).String", Method, 0}, + {"(R_LARCH).GoString", Method, 19}, + {"(R_LARCH).String", Method, 19}, + {"(R_MIPS).GoString", Method, 6}, + {"(R_MIPS).String", Method, 6}, + {"(R_PPC).GoString", Method, 0}, + {"(R_PPC).String", Method, 0}, + {"(R_PPC64).GoString", Method, 5}, + {"(R_PPC64).String", Method, 5}, + {"(R_RISCV).GoString", Method, 11}, + {"(R_RISCV).String", Method, 11}, + {"(R_SPARC).GoString", Method, 0}, + {"(R_SPARC).String", Method, 0}, + {"(R_X86_64).GoString", Method, 0}, + {"(R_X86_64).String", Method, 0}, + {"(Section).ReadAt", Method, 0}, + {"(SectionFlag).GoString", Method, 0}, + {"(SectionFlag).String", Method, 0}, + {"(SectionIndex).GoString", Method, 0}, + {"(SectionIndex).String", Method, 0}, + {"(SectionType).GoString", Method, 0}, + {"(SectionType).String", Method, 0}, + {"(SymBind).GoString", Method, 0}, + {"(SymBind).String", Method, 0}, + {"(SymType).GoString", Method, 0}, + {"(SymType).String", Method, 0}, + {"(SymVis).GoString", Method, 0}, + {"(SymVis).String", Method, 0}, + {"(Type).GoString", Method, 0}, + {"(Type).String", Method, 0}, + {"(Version).GoString", Method, 0}, + {"(Version).String", Method, 0}, + {"ARM_MAGIC_TRAMP_NUMBER", Const, 0}, + {"COMPRESS_HIOS", Const, 6}, + {"COMPRESS_HIPROC", Const, 6}, + {"COMPRESS_LOOS", Const, 6}, + {"COMPRESS_LOPROC", Const, 6}, + {"COMPRESS_ZLIB", Const, 6}, + {"COMPRESS_ZSTD", Const, 21}, + {"Chdr32", Type, 6}, + {"Chdr32.Addralign", Field, 6}, + {"Chdr32.Size", Field, 6}, + {"Chdr32.Type", Field, 6}, + {"Chdr64", Type, 6}, + {"Chdr64.Addralign", Field, 6}, + {"Chdr64.Size", Field, 6}, + {"Chdr64.Type", Field, 6}, + {"Class", Type, 0}, + {"CompressionType", Type, 6}, + {"DF_1_CONFALT", Const, 21}, + {"DF_1_DIRECT", Const, 21}, + {"DF_1_DISPRELDNE", Const, 21}, + {"DF_1_DISPRELPND", Const, 21}, + {"DF_1_EDITED", Const, 21}, + {"DF_1_ENDFILTEE", Const, 21}, + {"DF_1_GLOBAL", Const, 21}, + {"DF_1_GLOBAUDIT", Const, 21}, + {"DF_1_GROUP", Const, 21}, + {"DF_1_IGNMULDEF", Const, 21}, + {"DF_1_INITFIRST", Const, 21}, + {"DF_1_INTERPOSE", Const, 21}, + {"DF_1_KMOD", Const, 21}, + {"DF_1_LOADFLTR", Const, 21}, + {"DF_1_NOCOMMON", Const, 21}, + {"DF_1_NODEFLIB", Const, 21}, + {"DF_1_NODELETE", Const, 21}, + {"DF_1_NODIRECT", Const, 21}, + {"DF_1_NODUMP", Const, 21}, + {"DF_1_NOHDR", Const, 21}, + {"DF_1_NOKSYMS", Const, 21}, + {"DF_1_NOOPEN", Const, 21}, + {"DF_1_NORELOC", Const, 21}, + {"DF_1_NOW", Const, 21}, + {"DF_1_ORIGIN", Const, 21}, + {"DF_1_PIE", Const, 21}, + {"DF_1_SINGLETON", Const, 21}, + {"DF_1_STUB", Const, 21}, + {"DF_1_SYMINTPOSE", Const, 21}, + {"DF_1_TRANS", Const, 21}, + {"DF_1_WEAKFILTER", Const, 21}, + {"DF_BIND_NOW", Const, 0}, + {"DF_ORIGIN", Const, 0}, + {"DF_STATIC_TLS", Const, 0}, + {"DF_SYMBOLIC", Const, 0}, + {"DF_TEXTREL", Const, 0}, + {"DT_ADDRRNGHI", Const, 16}, + {"DT_ADDRRNGLO", Const, 16}, + {"DT_AUDIT", Const, 16}, + {"DT_AUXILIARY", Const, 16}, + {"DT_BIND_NOW", Const, 0}, + {"DT_CHECKSUM", Const, 16}, + {"DT_CONFIG", Const, 16}, + {"DT_DEBUG", Const, 0}, + {"DT_DEPAUDIT", Const, 16}, + {"DT_ENCODING", Const, 0}, + {"DT_FEATURE", Const, 16}, + {"DT_FILTER", Const, 16}, + {"DT_FINI", Const, 0}, + {"DT_FINI_ARRAY", Const, 0}, + {"DT_FINI_ARRAYSZ", Const, 0}, + {"DT_FLAGS", Const, 0}, + {"DT_FLAGS_1", Const, 16}, + {"DT_GNU_CONFLICT", Const, 16}, + {"DT_GNU_CONFLICTSZ", Const, 16}, + {"DT_GNU_HASH", Const, 16}, + {"DT_GNU_LIBLIST", Const, 16}, + {"DT_GNU_LIBLISTSZ", Const, 16}, + {"DT_GNU_PRELINKED", Const, 16}, + {"DT_HASH", Const, 0}, + {"DT_HIOS", Const, 0}, + {"DT_HIPROC", Const, 0}, + {"DT_INIT", Const, 0}, + {"DT_INIT_ARRAY", Const, 0}, + {"DT_INIT_ARRAYSZ", Const, 0}, + {"DT_JMPREL", Const, 0}, + {"DT_LOOS", Const, 0}, + {"DT_LOPROC", Const, 0}, + {"DT_MIPS_AUX_DYNAMIC", Const, 16}, + {"DT_MIPS_BASE_ADDRESS", Const, 16}, + {"DT_MIPS_COMPACT_SIZE", Const, 16}, + {"DT_MIPS_CONFLICT", Const, 16}, + {"DT_MIPS_CONFLICTNO", Const, 16}, + {"DT_MIPS_CXX_FLAGS", Const, 16}, + {"DT_MIPS_DELTA_CLASS", Const, 16}, + {"DT_MIPS_DELTA_CLASSSYM", Const, 16}, + {"DT_MIPS_DELTA_CLASSSYM_NO", Const, 16}, + {"DT_MIPS_DELTA_CLASS_NO", Const, 16}, + {"DT_MIPS_DELTA_INSTANCE", Const, 16}, + {"DT_MIPS_DELTA_INSTANCE_NO", Const, 16}, + {"DT_MIPS_DELTA_RELOC", Const, 16}, + {"DT_MIPS_DELTA_RELOC_NO", Const, 16}, + {"DT_MIPS_DELTA_SYM", Const, 16}, + {"DT_MIPS_DELTA_SYM_NO", Const, 16}, + {"DT_MIPS_DYNSTR_ALIGN", Const, 16}, + {"DT_MIPS_FLAGS", Const, 16}, + {"DT_MIPS_GOTSYM", Const, 16}, + {"DT_MIPS_GP_VALUE", Const, 16}, + {"DT_MIPS_HIDDEN_GOTIDX", Const, 16}, + {"DT_MIPS_HIPAGENO", Const, 16}, + {"DT_MIPS_ICHECKSUM", Const, 16}, + {"DT_MIPS_INTERFACE", Const, 16}, + {"DT_MIPS_INTERFACE_SIZE", Const, 16}, + {"DT_MIPS_IVERSION", Const, 16}, + {"DT_MIPS_LIBLIST", Const, 16}, + {"DT_MIPS_LIBLISTNO", Const, 16}, + {"DT_MIPS_LOCALPAGE_GOTIDX", Const, 16}, + {"DT_MIPS_LOCAL_GOTIDX", Const, 16}, + {"DT_MIPS_LOCAL_GOTNO", Const, 16}, + {"DT_MIPS_MSYM", Const, 16}, + {"DT_MIPS_OPTIONS", Const, 16}, + {"DT_MIPS_PERF_SUFFIX", Const, 16}, + {"DT_MIPS_PIXIE_INIT", Const, 16}, + {"DT_MIPS_PLTGOT", Const, 16}, + {"DT_MIPS_PROTECTED_GOTIDX", Const, 16}, + {"DT_MIPS_RLD_MAP", Const, 16}, + {"DT_MIPS_RLD_MAP_REL", Const, 16}, + {"DT_MIPS_RLD_TEXT_RESOLVE_ADDR", Const, 16}, + {"DT_MIPS_RLD_VERSION", Const, 16}, + {"DT_MIPS_RWPLT", Const, 16}, + {"DT_MIPS_SYMBOL_LIB", Const, 16}, + {"DT_MIPS_SYMTABNO", Const, 16}, + {"DT_MIPS_TIME_STAMP", Const, 16}, + {"DT_MIPS_UNREFEXTNO", Const, 16}, + {"DT_MOVEENT", Const, 16}, + {"DT_MOVESZ", Const, 16}, + {"DT_MOVETAB", Const, 16}, + {"DT_NEEDED", Const, 0}, + {"DT_NULL", Const, 0}, + {"DT_PLTGOT", Const, 0}, + {"DT_PLTPAD", Const, 16}, + {"DT_PLTPADSZ", Const, 16}, + {"DT_PLTREL", Const, 0}, + {"DT_PLTRELSZ", Const, 0}, + {"DT_POSFLAG_1", Const, 16}, + {"DT_PPC64_GLINK", Const, 16}, + {"DT_PPC64_OPD", Const, 16}, + {"DT_PPC64_OPDSZ", Const, 16}, + {"DT_PPC64_OPT", Const, 16}, + {"DT_PPC_GOT", Const, 16}, + {"DT_PPC_OPT", Const, 16}, + {"DT_PREINIT_ARRAY", Const, 0}, + {"DT_PREINIT_ARRAYSZ", Const, 0}, + {"DT_REL", Const, 0}, + {"DT_RELA", Const, 0}, + {"DT_RELACOUNT", Const, 16}, + {"DT_RELAENT", Const, 0}, + {"DT_RELASZ", Const, 0}, + {"DT_RELCOUNT", Const, 16}, + {"DT_RELENT", Const, 0}, + {"DT_RELSZ", Const, 0}, + {"DT_RPATH", Const, 0}, + {"DT_RUNPATH", Const, 0}, + {"DT_SONAME", Const, 0}, + {"DT_SPARC_REGISTER", Const, 16}, + {"DT_STRSZ", Const, 0}, + {"DT_STRTAB", Const, 0}, + {"DT_SYMBOLIC", Const, 0}, + {"DT_SYMENT", Const, 0}, + {"DT_SYMINENT", Const, 16}, + {"DT_SYMINFO", Const, 16}, + {"DT_SYMINSZ", Const, 16}, + {"DT_SYMTAB", Const, 0}, + {"DT_SYMTAB_SHNDX", Const, 16}, + {"DT_TEXTREL", Const, 0}, + {"DT_TLSDESC_GOT", Const, 16}, + {"DT_TLSDESC_PLT", Const, 16}, + {"DT_USED", Const, 16}, + {"DT_VALRNGHI", Const, 16}, + {"DT_VALRNGLO", Const, 16}, + {"DT_VERDEF", Const, 16}, + {"DT_VERDEFNUM", Const, 16}, + {"DT_VERNEED", Const, 0}, + {"DT_VERNEEDNUM", Const, 0}, + {"DT_VERSYM", Const, 0}, + {"Data", Type, 0}, + {"Dyn32", Type, 0}, + {"Dyn32.Tag", Field, 0}, + {"Dyn32.Val", Field, 0}, + {"Dyn64", Type, 0}, + {"Dyn64.Tag", Field, 0}, + {"Dyn64.Val", Field, 0}, + {"DynFlag", Type, 0}, + {"DynFlag1", Type, 21}, + {"DynTag", Type, 0}, + {"EI_ABIVERSION", Const, 0}, + {"EI_CLASS", Const, 0}, + {"EI_DATA", Const, 0}, + {"EI_NIDENT", Const, 0}, + {"EI_OSABI", Const, 0}, + {"EI_PAD", Const, 0}, + {"EI_VERSION", Const, 0}, + {"ELFCLASS32", Const, 0}, + {"ELFCLASS64", Const, 0}, + {"ELFCLASSNONE", Const, 0}, + {"ELFDATA2LSB", Const, 0}, + {"ELFDATA2MSB", Const, 0}, + {"ELFDATANONE", Const, 0}, + {"ELFMAG", Const, 0}, + {"ELFOSABI_86OPEN", Const, 0}, + {"ELFOSABI_AIX", Const, 0}, + {"ELFOSABI_ARM", Const, 0}, + {"ELFOSABI_AROS", Const, 11}, + {"ELFOSABI_CLOUDABI", Const, 11}, + {"ELFOSABI_FENIXOS", Const, 11}, + {"ELFOSABI_FREEBSD", Const, 0}, + {"ELFOSABI_HPUX", Const, 0}, + {"ELFOSABI_HURD", Const, 0}, + {"ELFOSABI_IRIX", Const, 0}, + {"ELFOSABI_LINUX", Const, 0}, + {"ELFOSABI_MODESTO", Const, 0}, + {"ELFOSABI_NETBSD", Const, 0}, + {"ELFOSABI_NONE", Const, 0}, + {"ELFOSABI_NSK", Const, 0}, + {"ELFOSABI_OPENBSD", Const, 0}, + {"ELFOSABI_OPENVMS", Const, 0}, + {"ELFOSABI_SOLARIS", Const, 0}, + {"ELFOSABI_STANDALONE", Const, 0}, + {"ELFOSABI_TRU64", Const, 0}, + {"EM_386", Const, 0}, + {"EM_486", Const, 0}, + {"EM_56800EX", Const, 11}, + {"EM_68HC05", Const, 11}, + {"EM_68HC08", Const, 11}, + {"EM_68HC11", Const, 11}, + {"EM_68HC12", Const, 0}, + {"EM_68HC16", Const, 11}, + {"EM_68K", Const, 0}, + {"EM_78KOR", Const, 11}, + {"EM_8051", Const, 11}, + {"EM_860", Const, 0}, + {"EM_88K", Const, 0}, + {"EM_960", Const, 0}, + {"EM_AARCH64", Const, 4}, + {"EM_ALPHA", Const, 0}, + {"EM_ALPHA_STD", Const, 0}, + {"EM_ALTERA_NIOS2", Const, 11}, + {"EM_AMDGPU", Const, 11}, + {"EM_ARC", Const, 0}, + {"EM_ARCA", Const, 11}, + {"EM_ARC_COMPACT", Const, 11}, + {"EM_ARC_COMPACT2", Const, 11}, + {"EM_ARM", Const, 0}, + {"EM_AVR", Const, 11}, + {"EM_AVR32", Const, 11}, + {"EM_BA1", Const, 11}, + {"EM_BA2", Const, 11}, + {"EM_BLACKFIN", Const, 11}, + {"EM_BPF", Const, 11}, + {"EM_C166", Const, 11}, + {"EM_CDP", Const, 11}, + {"EM_CE", Const, 11}, + {"EM_CLOUDSHIELD", Const, 11}, + {"EM_COGE", Const, 11}, + {"EM_COLDFIRE", Const, 0}, + {"EM_COOL", Const, 11}, + {"EM_COREA_1ST", Const, 11}, + {"EM_COREA_2ND", Const, 11}, + {"EM_CR", Const, 11}, + {"EM_CR16", Const, 11}, + {"EM_CRAYNV2", Const, 11}, + {"EM_CRIS", Const, 11}, + {"EM_CRX", Const, 11}, + {"EM_CSR_KALIMBA", Const, 11}, + {"EM_CUDA", Const, 11}, + {"EM_CYPRESS_M8C", Const, 11}, + {"EM_D10V", Const, 11}, + {"EM_D30V", Const, 11}, + {"EM_DSP24", Const, 11}, + {"EM_DSPIC30F", Const, 11}, + {"EM_DXP", Const, 11}, + {"EM_ECOG1", Const, 11}, + {"EM_ECOG16", Const, 11}, + {"EM_ECOG1X", Const, 11}, + {"EM_ECOG2", Const, 11}, + {"EM_ETPU", Const, 11}, + {"EM_EXCESS", Const, 11}, + {"EM_F2MC16", Const, 11}, + {"EM_FIREPATH", Const, 11}, + {"EM_FR20", Const, 0}, + {"EM_FR30", Const, 11}, + {"EM_FT32", Const, 11}, + {"EM_FX66", Const, 11}, + {"EM_H8S", Const, 0}, + {"EM_H8_300", Const, 0}, + {"EM_H8_300H", Const, 0}, + {"EM_H8_500", Const, 0}, + {"EM_HUANY", Const, 11}, + {"EM_IA_64", Const, 0}, + {"EM_INTEL205", Const, 11}, + {"EM_INTEL206", Const, 11}, + {"EM_INTEL207", Const, 11}, + {"EM_INTEL208", Const, 11}, + {"EM_INTEL209", Const, 11}, + {"EM_IP2K", Const, 11}, + {"EM_JAVELIN", Const, 11}, + {"EM_K10M", Const, 11}, + {"EM_KM32", Const, 11}, + {"EM_KMX16", Const, 11}, + {"EM_KMX32", Const, 11}, + {"EM_KMX8", Const, 11}, + {"EM_KVARC", Const, 11}, + {"EM_L10M", Const, 11}, + {"EM_LANAI", Const, 11}, + {"EM_LATTICEMICO32", Const, 11}, + {"EM_LOONGARCH", Const, 19}, + {"EM_M16C", Const, 11}, + {"EM_M32", Const, 0}, + {"EM_M32C", Const, 11}, + {"EM_M32R", Const, 11}, + {"EM_MANIK", Const, 11}, + {"EM_MAX", Const, 11}, + {"EM_MAXQ30", Const, 11}, + {"EM_MCHP_PIC", Const, 11}, + {"EM_MCST_ELBRUS", Const, 11}, + {"EM_ME16", Const, 0}, + {"EM_METAG", Const, 11}, + {"EM_MICROBLAZE", Const, 11}, + {"EM_MIPS", Const, 0}, + {"EM_MIPS_RS3_LE", Const, 0}, + {"EM_MIPS_RS4_BE", Const, 0}, + {"EM_MIPS_X", Const, 0}, + {"EM_MMA", Const, 0}, + {"EM_MMDSP_PLUS", Const, 11}, + {"EM_MMIX", Const, 11}, + {"EM_MN10200", Const, 11}, + {"EM_MN10300", Const, 11}, + {"EM_MOXIE", Const, 11}, + {"EM_MSP430", Const, 11}, + {"EM_NCPU", Const, 0}, + {"EM_NDR1", Const, 0}, + {"EM_NDS32", Const, 11}, + {"EM_NONE", Const, 0}, + {"EM_NORC", Const, 11}, + {"EM_NS32K", Const, 11}, + {"EM_OPEN8", Const, 11}, + {"EM_OPENRISC", Const, 11}, + {"EM_PARISC", Const, 0}, + {"EM_PCP", Const, 0}, + {"EM_PDP10", Const, 11}, + {"EM_PDP11", Const, 11}, + {"EM_PDSP", Const, 11}, + {"EM_PJ", Const, 11}, + {"EM_PPC", Const, 0}, + {"EM_PPC64", Const, 0}, + {"EM_PRISM", Const, 11}, + {"EM_QDSP6", Const, 11}, + {"EM_R32C", Const, 11}, + {"EM_RCE", Const, 0}, + {"EM_RH32", Const, 0}, + {"EM_RISCV", Const, 11}, + {"EM_RL78", Const, 11}, + {"EM_RS08", Const, 11}, + {"EM_RX", Const, 11}, + {"EM_S370", Const, 0}, + {"EM_S390", Const, 0}, + {"EM_SCORE7", Const, 11}, + {"EM_SEP", Const, 11}, + {"EM_SE_C17", Const, 11}, + {"EM_SE_C33", Const, 11}, + {"EM_SH", Const, 0}, + {"EM_SHARC", Const, 11}, + {"EM_SLE9X", Const, 11}, + {"EM_SNP1K", Const, 11}, + {"EM_SPARC", Const, 0}, + {"EM_SPARC32PLUS", Const, 0}, + {"EM_SPARCV9", Const, 0}, + {"EM_ST100", Const, 0}, + {"EM_ST19", Const, 11}, + {"EM_ST200", Const, 11}, + {"EM_ST7", Const, 11}, + {"EM_ST9PLUS", Const, 11}, + {"EM_STARCORE", Const, 0}, + {"EM_STM8", Const, 11}, + {"EM_STXP7X", Const, 11}, + {"EM_SVX", Const, 11}, + {"EM_TILE64", Const, 11}, + {"EM_TILEGX", Const, 11}, + {"EM_TILEPRO", Const, 11}, + {"EM_TINYJ", Const, 0}, + {"EM_TI_ARP32", Const, 11}, + {"EM_TI_C2000", Const, 11}, + {"EM_TI_C5500", Const, 11}, + {"EM_TI_C6000", Const, 11}, + {"EM_TI_PRU", Const, 11}, + {"EM_TMM_GPP", Const, 11}, + {"EM_TPC", Const, 11}, + {"EM_TRICORE", Const, 0}, + {"EM_TRIMEDIA", Const, 11}, + {"EM_TSK3000", Const, 11}, + {"EM_UNICORE", Const, 11}, + {"EM_V800", Const, 0}, + {"EM_V850", Const, 11}, + {"EM_VAX", Const, 11}, + {"EM_VIDEOCORE", Const, 11}, + {"EM_VIDEOCORE3", Const, 11}, + {"EM_VIDEOCORE5", Const, 11}, + {"EM_VISIUM", Const, 11}, + {"EM_VPP500", Const, 0}, + {"EM_X86_64", Const, 0}, + {"EM_XCORE", Const, 11}, + {"EM_XGATE", Const, 11}, + {"EM_XIMO16", Const, 11}, + {"EM_XTENSA", Const, 11}, + {"EM_Z80", Const, 11}, + {"EM_ZSP", Const, 11}, + {"ET_CORE", Const, 0}, + {"ET_DYN", Const, 0}, + {"ET_EXEC", Const, 0}, + {"ET_HIOS", Const, 0}, + {"ET_HIPROC", Const, 0}, + {"ET_LOOS", Const, 0}, + {"ET_LOPROC", Const, 0}, + {"ET_NONE", Const, 0}, + {"ET_REL", Const, 0}, + {"EV_CURRENT", Const, 0}, + {"EV_NONE", Const, 0}, + {"ErrNoSymbols", Var, 4}, + {"File", Type, 0}, + {"File.FileHeader", Field, 0}, + {"File.Progs", Field, 0}, + {"File.Sections", Field, 0}, + {"FileHeader", Type, 0}, + {"FileHeader.ABIVersion", Field, 0}, + {"FileHeader.ByteOrder", Field, 0}, + {"FileHeader.Class", Field, 0}, + {"FileHeader.Data", Field, 0}, + {"FileHeader.Entry", Field, 1}, + {"FileHeader.Machine", Field, 0}, + {"FileHeader.OSABI", Field, 0}, + {"FileHeader.Type", Field, 0}, + {"FileHeader.Version", Field, 0}, + {"FormatError", Type, 0}, + {"Header32", Type, 0}, + {"Header32.Ehsize", Field, 0}, + {"Header32.Entry", Field, 0}, + {"Header32.Flags", Field, 0}, + {"Header32.Ident", Field, 0}, + {"Header32.Machine", Field, 0}, + {"Header32.Phentsize", Field, 0}, + {"Header32.Phnum", Field, 0}, + {"Header32.Phoff", Field, 0}, + {"Header32.Shentsize", Field, 0}, + {"Header32.Shnum", Field, 0}, + {"Header32.Shoff", Field, 0}, + {"Header32.Shstrndx", Field, 0}, + {"Header32.Type", Field, 0}, + {"Header32.Version", Field, 0}, + {"Header64", Type, 0}, + {"Header64.Ehsize", Field, 0}, + {"Header64.Entry", Field, 0}, + {"Header64.Flags", Field, 0}, + {"Header64.Ident", Field, 0}, + {"Header64.Machine", Field, 0}, + {"Header64.Phentsize", Field, 0}, + {"Header64.Phnum", Field, 0}, + {"Header64.Phoff", Field, 0}, + {"Header64.Shentsize", Field, 0}, + {"Header64.Shnum", Field, 0}, + {"Header64.Shoff", Field, 0}, + {"Header64.Shstrndx", Field, 0}, + {"Header64.Type", Field, 0}, + {"Header64.Version", Field, 0}, + {"ImportedSymbol", Type, 0}, + {"ImportedSymbol.Library", Field, 0}, + {"ImportedSymbol.Name", Field, 0}, + {"ImportedSymbol.Version", Field, 0}, + {"Machine", Type, 0}, + {"NT_FPREGSET", Const, 0}, + {"NT_PRPSINFO", Const, 0}, + {"NT_PRSTATUS", Const, 0}, + {"NType", Type, 0}, + {"NewFile", Func, 0}, + {"OSABI", Type, 0}, + {"Open", Func, 0}, + {"PF_MASKOS", Const, 0}, + {"PF_MASKPROC", Const, 0}, + {"PF_R", Const, 0}, + {"PF_W", Const, 0}, + {"PF_X", Const, 0}, + {"PT_AARCH64_ARCHEXT", Const, 16}, + {"PT_AARCH64_UNWIND", Const, 16}, + {"PT_ARM_ARCHEXT", Const, 16}, + {"PT_ARM_EXIDX", Const, 16}, + {"PT_DYNAMIC", Const, 0}, + {"PT_GNU_EH_FRAME", Const, 16}, + {"PT_GNU_MBIND_HI", Const, 16}, + {"PT_GNU_MBIND_LO", Const, 16}, + {"PT_GNU_PROPERTY", Const, 16}, + {"PT_GNU_RELRO", Const, 16}, + {"PT_GNU_STACK", Const, 16}, + {"PT_HIOS", Const, 0}, + {"PT_HIPROC", Const, 0}, + {"PT_INTERP", Const, 0}, + {"PT_LOAD", Const, 0}, + {"PT_LOOS", Const, 0}, + {"PT_LOPROC", Const, 0}, + {"PT_MIPS_ABIFLAGS", Const, 16}, + {"PT_MIPS_OPTIONS", Const, 16}, + {"PT_MIPS_REGINFO", Const, 16}, + {"PT_MIPS_RTPROC", Const, 16}, + {"PT_NOTE", Const, 0}, + {"PT_NULL", Const, 0}, + {"PT_OPENBSD_BOOTDATA", Const, 16}, + {"PT_OPENBSD_RANDOMIZE", Const, 16}, + {"PT_OPENBSD_WXNEEDED", Const, 16}, + {"PT_PAX_FLAGS", Const, 16}, + {"PT_PHDR", Const, 0}, + {"PT_S390_PGSTE", Const, 16}, + {"PT_SHLIB", Const, 0}, + {"PT_SUNWSTACK", Const, 16}, + {"PT_SUNW_EH_FRAME", Const, 16}, + {"PT_TLS", Const, 0}, + {"Prog", Type, 0}, + {"Prog.ProgHeader", Field, 0}, + {"Prog.ReaderAt", Field, 0}, + {"Prog32", Type, 0}, + {"Prog32.Align", Field, 0}, + {"Prog32.Filesz", Field, 0}, + {"Prog32.Flags", Field, 0}, + {"Prog32.Memsz", Field, 0}, + {"Prog32.Off", Field, 0}, + {"Prog32.Paddr", Field, 0}, + {"Prog32.Type", Field, 0}, + {"Prog32.Vaddr", Field, 0}, + {"Prog64", Type, 0}, + {"Prog64.Align", Field, 0}, + {"Prog64.Filesz", Field, 0}, + {"Prog64.Flags", Field, 0}, + {"Prog64.Memsz", Field, 0}, + {"Prog64.Off", Field, 0}, + {"Prog64.Paddr", Field, 0}, + {"Prog64.Type", Field, 0}, + {"Prog64.Vaddr", Field, 0}, + {"ProgFlag", Type, 0}, + {"ProgHeader", Type, 0}, + {"ProgHeader.Align", Field, 0}, + {"ProgHeader.Filesz", Field, 0}, + {"ProgHeader.Flags", Field, 0}, + {"ProgHeader.Memsz", Field, 0}, + {"ProgHeader.Off", Field, 0}, + {"ProgHeader.Paddr", Field, 0}, + {"ProgHeader.Type", Field, 0}, + {"ProgHeader.Vaddr", Field, 0}, + {"ProgType", Type, 0}, + {"R_386", Type, 0}, + {"R_386_16", Const, 10}, + {"R_386_32", Const, 0}, + {"R_386_32PLT", Const, 10}, + {"R_386_8", Const, 10}, + {"R_386_COPY", Const, 0}, + {"R_386_GLOB_DAT", Const, 0}, + {"R_386_GOT32", Const, 0}, + {"R_386_GOT32X", Const, 10}, + {"R_386_GOTOFF", Const, 0}, + {"R_386_GOTPC", Const, 0}, + {"R_386_IRELATIVE", Const, 10}, + {"R_386_JMP_SLOT", Const, 0}, + {"R_386_NONE", Const, 0}, + {"R_386_PC16", Const, 10}, + {"R_386_PC32", Const, 0}, + {"R_386_PC8", Const, 10}, + {"R_386_PLT32", Const, 0}, + {"R_386_RELATIVE", Const, 0}, + {"R_386_SIZE32", Const, 10}, + {"R_386_TLS_DESC", Const, 10}, + {"R_386_TLS_DESC_CALL", Const, 10}, + {"R_386_TLS_DTPMOD32", Const, 0}, + {"R_386_TLS_DTPOFF32", Const, 0}, + {"R_386_TLS_GD", Const, 0}, + {"R_386_TLS_GD_32", Const, 0}, + {"R_386_TLS_GD_CALL", Const, 0}, + {"R_386_TLS_GD_POP", Const, 0}, + {"R_386_TLS_GD_PUSH", Const, 0}, + {"R_386_TLS_GOTDESC", Const, 10}, + {"R_386_TLS_GOTIE", Const, 0}, + {"R_386_TLS_IE", Const, 0}, + {"R_386_TLS_IE_32", Const, 0}, + {"R_386_TLS_LDM", Const, 0}, + {"R_386_TLS_LDM_32", Const, 0}, + {"R_386_TLS_LDM_CALL", Const, 0}, + {"R_386_TLS_LDM_POP", Const, 0}, + {"R_386_TLS_LDM_PUSH", Const, 0}, + {"R_386_TLS_LDO_32", Const, 0}, + {"R_386_TLS_LE", Const, 0}, + {"R_386_TLS_LE_32", Const, 0}, + {"R_386_TLS_TPOFF", Const, 0}, + {"R_386_TLS_TPOFF32", Const, 0}, + {"R_390", Type, 7}, + {"R_390_12", Const, 7}, + {"R_390_16", Const, 7}, + {"R_390_20", Const, 7}, + {"R_390_32", Const, 7}, + {"R_390_64", Const, 7}, + {"R_390_8", Const, 7}, + {"R_390_COPY", Const, 7}, + {"R_390_GLOB_DAT", Const, 7}, + {"R_390_GOT12", Const, 7}, + {"R_390_GOT16", Const, 7}, + {"R_390_GOT20", Const, 7}, + {"R_390_GOT32", Const, 7}, + {"R_390_GOT64", Const, 7}, + {"R_390_GOTENT", Const, 7}, + {"R_390_GOTOFF", Const, 7}, + {"R_390_GOTOFF16", Const, 7}, + {"R_390_GOTOFF64", Const, 7}, + {"R_390_GOTPC", Const, 7}, + {"R_390_GOTPCDBL", Const, 7}, + {"R_390_GOTPLT12", Const, 7}, + {"R_390_GOTPLT16", Const, 7}, + {"R_390_GOTPLT20", Const, 7}, + {"R_390_GOTPLT32", Const, 7}, + {"R_390_GOTPLT64", Const, 7}, + {"R_390_GOTPLTENT", Const, 7}, + {"R_390_GOTPLTOFF16", Const, 7}, + {"R_390_GOTPLTOFF32", Const, 7}, + {"R_390_GOTPLTOFF64", Const, 7}, + {"R_390_JMP_SLOT", Const, 7}, + {"R_390_NONE", Const, 7}, + {"R_390_PC16", Const, 7}, + {"R_390_PC16DBL", Const, 7}, + {"R_390_PC32", Const, 7}, + {"R_390_PC32DBL", Const, 7}, + {"R_390_PC64", Const, 7}, + {"R_390_PLT16DBL", Const, 7}, + {"R_390_PLT32", Const, 7}, + {"R_390_PLT32DBL", Const, 7}, + {"R_390_PLT64", Const, 7}, + {"R_390_RELATIVE", Const, 7}, + {"R_390_TLS_DTPMOD", Const, 7}, + {"R_390_TLS_DTPOFF", Const, 7}, + {"R_390_TLS_GD32", Const, 7}, + {"R_390_TLS_GD64", Const, 7}, + {"R_390_TLS_GDCALL", Const, 7}, + {"R_390_TLS_GOTIE12", Const, 7}, + {"R_390_TLS_GOTIE20", Const, 7}, + {"R_390_TLS_GOTIE32", Const, 7}, + {"R_390_TLS_GOTIE64", Const, 7}, + {"R_390_TLS_IE32", Const, 7}, + {"R_390_TLS_IE64", Const, 7}, + {"R_390_TLS_IEENT", Const, 7}, + {"R_390_TLS_LDCALL", Const, 7}, + {"R_390_TLS_LDM32", Const, 7}, + {"R_390_TLS_LDM64", Const, 7}, + {"R_390_TLS_LDO32", Const, 7}, + {"R_390_TLS_LDO64", Const, 7}, + {"R_390_TLS_LE32", Const, 7}, + {"R_390_TLS_LE64", Const, 7}, + {"R_390_TLS_LOAD", Const, 7}, + {"R_390_TLS_TPOFF", Const, 7}, + {"R_AARCH64", Type, 4}, + {"R_AARCH64_ABS16", Const, 4}, + {"R_AARCH64_ABS32", Const, 4}, + {"R_AARCH64_ABS64", Const, 4}, + {"R_AARCH64_ADD_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_ADR_GOT_PAGE", Const, 4}, + {"R_AARCH64_ADR_PREL_LO21", Const, 4}, + {"R_AARCH64_ADR_PREL_PG_HI21", Const, 4}, + {"R_AARCH64_ADR_PREL_PG_HI21_NC", Const, 4}, + {"R_AARCH64_CALL26", Const, 4}, + {"R_AARCH64_CONDBR19", Const, 4}, + {"R_AARCH64_COPY", Const, 4}, + {"R_AARCH64_GLOB_DAT", Const, 4}, + {"R_AARCH64_GOT_LD_PREL19", Const, 4}, + {"R_AARCH64_IRELATIVE", Const, 4}, + {"R_AARCH64_JUMP26", Const, 4}, + {"R_AARCH64_JUMP_SLOT", Const, 4}, + {"R_AARCH64_LD64_GOTOFF_LO15", Const, 10}, + {"R_AARCH64_LD64_GOTPAGE_LO15", Const, 10}, + {"R_AARCH64_LD64_GOT_LO12_NC", Const, 4}, + {"R_AARCH64_LDST128_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LDST16_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LDST32_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LDST64_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LDST8_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LD_PREL_LO19", Const, 4}, + {"R_AARCH64_MOVW_SABS_G0", Const, 4}, + {"R_AARCH64_MOVW_SABS_G1", Const, 4}, + {"R_AARCH64_MOVW_SABS_G2", Const, 4}, + {"R_AARCH64_MOVW_UABS_G0", Const, 4}, + {"R_AARCH64_MOVW_UABS_G0_NC", Const, 4}, + {"R_AARCH64_MOVW_UABS_G1", Const, 4}, + {"R_AARCH64_MOVW_UABS_G1_NC", Const, 4}, + {"R_AARCH64_MOVW_UABS_G2", Const, 4}, + {"R_AARCH64_MOVW_UABS_G2_NC", Const, 4}, + {"R_AARCH64_MOVW_UABS_G3", Const, 4}, + {"R_AARCH64_NONE", Const, 4}, + {"R_AARCH64_NULL", Const, 4}, + {"R_AARCH64_P32_ABS16", Const, 4}, + {"R_AARCH64_P32_ABS32", Const, 4}, + {"R_AARCH64_P32_ADD_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_ADR_GOT_PAGE", Const, 4}, + {"R_AARCH64_P32_ADR_PREL_LO21", Const, 4}, + {"R_AARCH64_P32_ADR_PREL_PG_HI21", Const, 4}, + {"R_AARCH64_P32_CALL26", Const, 4}, + {"R_AARCH64_P32_CONDBR19", Const, 4}, + {"R_AARCH64_P32_COPY", Const, 4}, + {"R_AARCH64_P32_GLOB_DAT", Const, 4}, + {"R_AARCH64_P32_GOT_LD_PREL19", Const, 4}, + {"R_AARCH64_P32_IRELATIVE", Const, 4}, + {"R_AARCH64_P32_JUMP26", Const, 4}, + {"R_AARCH64_P32_JUMP_SLOT", Const, 4}, + {"R_AARCH64_P32_LD32_GOT_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST128_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST16_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST32_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST64_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST8_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LD_PREL_LO19", Const, 4}, + {"R_AARCH64_P32_MOVW_SABS_G0", Const, 4}, + {"R_AARCH64_P32_MOVW_UABS_G0", Const, 4}, + {"R_AARCH64_P32_MOVW_UABS_G0_NC", Const, 4}, + {"R_AARCH64_P32_MOVW_UABS_G1", Const, 4}, + {"R_AARCH64_P32_PREL16", Const, 4}, + {"R_AARCH64_P32_PREL32", Const, 4}, + {"R_AARCH64_P32_RELATIVE", Const, 4}, + {"R_AARCH64_P32_TLSDESC", Const, 4}, + {"R_AARCH64_P32_TLSDESC_ADD_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSDESC_ADR_PAGE21", Const, 4}, + {"R_AARCH64_P32_TLSDESC_ADR_PREL21", Const, 4}, + {"R_AARCH64_P32_TLSDESC_CALL", Const, 4}, + {"R_AARCH64_P32_TLSDESC_LD32_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSDESC_LD_PREL19", Const, 4}, + {"R_AARCH64_P32_TLSGD_ADD_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSGD_ADR_PAGE21", Const, 4}, + {"R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4}, + {"R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19", Const, 4}, + {"R_AARCH64_P32_TLSLE_ADD_TPREL_HI12", Const, 4}, + {"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12", Const, 4}, + {"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0", Const, 4}, + {"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC", Const, 4}, + {"R_AARCH64_P32_TLSLE_MOVW_TPREL_G1", Const, 4}, + {"R_AARCH64_P32_TLS_DTPMOD", Const, 4}, + {"R_AARCH64_P32_TLS_DTPREL", Const, 4}, + {"R_AARCH64_P32_TLS_TPREL", Const, 4}, + {"R_AARCH64_P32_TSTBR14", Const, 4}, + {"R_AARCH64_PREL16", Const, 4}, + {"R_AARCH64_PREL32", Const, 4}, + {"R_AARCH64_PREL64", Const, 4}, + {"R_AARCH64_RELATIVE", Const, 4}, + {"R_AARCH64_TLSDESC", Const, 4}, + {"R_AARCH64_TLSDESC_ADD", Const, 4}, + {"R_AARCH64_TLSDESC_ADD_LO12_NC", Const, 4}, + {"R_AARCH64_TLSDESC_ADR_PAGE21", Const, 4}, + {"R_AARCH64_TLSDESC_ADR_PREL21", Const, 4}, + {"R_AARCH64_TLSDESC_CALL", Const, 4}, + {"R_AARCH64_TLSDESC_LD64_LO12_NC", Const, 4}, + {"R_AARCH64_TLSDESC_LDR", Const, 4}, + {"R_AARCH64_TLSDESC_LD_PREL19", Const, 4}, + {"R_AARCH64_TLSDESC_OFF_G0_NC", Const, 4}, + {"R_AARCH64_TLSDESC_OFF_G1", Const, 4}, + {"R_AARCH64_TLSGD_ADD_LO12_NC", Const, 4}, + {"R_AARCH64_TLSGD_ADR_PAGE21", Const, 4}, + {"R_AARCH64_TLSGD_ADR_PREL21", Const, 10}, + {"R_AARCH64_TLSGD_MOVW_G0_NC", Const, 10}, + {"R_AARCH64_TLSGD_MOVW_G1", Const, 10}, + {"R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4}, + {"R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", Const, 4}, + {"R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", Const, 4}, + {"R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", Const, 4}, + {"R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", Const, 4}, + {"R_AARCH64_TLSLD_ADR_PAGE21", Const, 10}, + {"R_AARCH64_TLSLD_ADR_PREL21", Const, 10}, + {"R_AARCH64_TLSLD_LDST128_DTPREL_LO12", Const, 10}, + {"R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC", Const, 10}, + {"R_AARCH64_TLSLE_ADD_TPREL_HI12", Const, 4}, + {"R_AARCH64_TLSLE_ADD_TPREL_LO12", Const, 4}, + {"R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", Const, 4}, + {"R_AARCH64_TLSLE_LDST128_TPREL_LO12", Const, 10}, + {"R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC", Const, 10}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G0", Const, 4}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", Const, 4}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G1", Const, 4}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", Const, 4}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G2", Const, 4}, + {"R_AARCH64_TLS_DTPMOD64", Const, 4}, + {"R_AARCH64_TLS_DTPREL64", Const, 4}, + {"R_AARCH64_TLS_TPREL64", Const, 4}, + {"R_AARCH64_TSTBR14", Const, 4}, + {"R_ALPHA", Type, 0}, + {"R_ALPHA_BRADDR", Const, 0}, + {"R_ALPHA_COPY", Const, 0}, + {"R_ALPHA_GLOB_DAT", Const, 0}, + {"R_ALPHA_GPDISP", Const, 0}, + {"R_ALPHA_GPREL32", Const, 0}, + {"R_ALPHA_GPRELHIGH", Const, 0}, + {"R_ALPHA_GPRELLOW", Const, 0}, + {"R_ALPHA_GPVALUE", Const, 0}, + {"R_ALPHA_HINT", Const, 0}, + {"R_ALPHA_IMMED_BR_HI32", Const, 0}, + {"R_ALPHA_IMMED_GP_16", Const, 0}, + {"R_ALPHA_IMMED_GP_HI32", Const, 0}, + {"R_ALPHA_IMMED_LO32", Const, 0}, + {"R_ALPHA_IMMED_SCN_HI32", Const, 0}, + {"R_ALPHA_JMP_SLOT", Const, 0}, + {"R_ALPHA_LITERAL", Const, 0}, + {"R_ALPHA_LITUSE", Const, 0}, + {"R_ALPHA_NONE", Const, 0}, + {"R_ALPHA_OP_PRSHIFT", Const, 0}, + {"R_ALPHA_OP_PSUB", Const, 0}, + {"R_ALPHA_OP_PUSH", Const, 0}, + {"R_ALPHA_OP_STORE", Const, 0}, + {"R_ALPHA_REFLONG", Const, 0}, + {"R_ALPHA_REFQUAD", Const, 0}, + {"R_ALPHA_RELATIVE", Const, 0}, + {"R_ALPHA_SREL16", Const, 0}, + {"R_ALPHA_SREL32", Const, 0}, + {"R_ALPHA_SREL64", Const, 0}, + {"R_ARM", Type, 0}, + {"R_ARM_ABS12", Const, 0}, + {"R_ARM_ABS16", Const, 0}, + {"R_ARM_ABS32", Const, 0}, + {"R_ARM_ABS32_NOI", Const, 10}, + {"R_ARM_ABS8", Const, 0}, + {"R_ARM_ALU_PCREL_15_8", Const, 10}, + {"R_ARM_ALU_PCREL_23_15", Const, 10}, + {"R_ARM_ALU_PCREL_7_0", Const, 10}, + {"R_ARM_ALU_PC_G0", Const, 10}, + {"R_ARM_ALU_PC_G0_NC", Const, 10}, + {"R_ARM_ALU_PC_G1", Const, 10}, + {"R_ARM_ALU_PC_G1_NC", Const, 10}, + {"R_ARM_ALU_PC_G2", Const, 10}, + {"R_ARM_ALU_SBREL_19_12_NC", Const, 10}, + {"R_ARM_ALU_SBREL_27_20_CK", Const, 10}, + {"R_ARM_ALU_SB_G0", Const, 10}, + {"R_ARM_ALU_SB_G0_NC", Const, 10}, + {"R_ARM_ALU_SB_G1", Const, 10}, + {"R_ARM_ALU_SB_G1_NC", Const, 10}, + {"R_ARM_ALU_SB_G2", Const, 10}, + {"R_ARM_AMP_VCALL9", Const, 0}, + {"R_ARM_BASE_ABS", Const, 10}, + {"R_ARM_CALL", Const, 10}, + {"R_ARM_COPY", Const, 0}, + {"R_ARM_GLOB_DAT", Const, 0}, + {"R_ARM_GNU_VTENTRY", Const, 0}, + {"R_ARM_GNU_VTINHERIT", Const, 0}, + {"R_ARM_GOT32", Const, 0}, + {"R_ARM_GOTOFF", Const, 0}, + {"R_ARM_GOTOFF12", Const, 10}, + {"R_ARM_GOTPC", Const, 0}, + {"R_ARM_GOTRELAX", Const, 10}, + {"R_ARM_GOT_ABS", Const, 10}, + {"R_ARM_GOT_BREL12", Const, 10}, + {"R_ARM_GOT_PREL", Const, 10}, + {"R_ARM_IRELATIVE", Const, 10}, + {"R_ARM_JUMP24", Const, 10}, + {"R_ARM_JUMP_SLOT", Const, 0}, + {"R_ARM_LDC_PC_G0", Const, 10}, + {"R_ARM_LDC_PC_G1", Const, 10}, + {"R_ARM_LDC_PC_G2", Const, 10}, + {"R_ARM_LDC_SB_G0", Const, 10}, + {"R_ARM_LDC_SB_G1", Const, 10}, + {"R_ARM_LDC_SB_G2", Const, 10}, + {"R_ARM_LDRS_PC_G0", Const, 10}, + {"R_ARM_LDRS_PC_G1", Const, 10}, + {"R_ARM_LDRS_PC_G2", Const, 10}, + {"R_ARM_LDRS_SB_G0", Const, 10}, + {"R_ARM_LDRS_SB_G1", Const, 10}, + {"R_ARM_LDRS_SB_G2", Const, 10}, + {"R_ARM_LDR_PC_G1", Const, 10}, + {"R_ARM_LDR_PC_G2", Const, 10}, + {"R_ARM_LDR_SBREL_11_10_NC", Const, 10}, + {"R_ARM_LDR_SB_G0", Const, 10}, + {"R_ARM_LDR_SB_G1", Const, 10}, + {"R_ARM_LDR_SB_G2", Const, 10}, + {"R_ARM_ME_TOO", Const, 10}, + {"R_ARM_MOVT_ABS", Const, 10}, + {"R_ARM_MOVT_BREL", Const, 10}, + {"R_ARM_MOVT_PREL", Const, 10}, + {"R_ARM_MOVW_ABS_NC", Const, 10}, + {"R_ARM_MOVW_BREL", Const, 10}, + {"R_ARM_MOVW_BREL_NC", Const, 10}, + {"R_ARM_MOVW_PREL_NC", Const, 10}, + {"R_ARM_NONE", Const, 0}, + {"R_ARM_PC13", Const, 0}, + {"R_ARM_PC24", Const, 0}, + {"R_ARM_PLT32", Const, 0}, + {"R_ARM_PLT32_ABS", Const, 10}, + {"R_ARM_PREL31", Const, 10}, + {"R_ARM_PRIVATE_0", Const, 10}, + {"R_ARM_PRIVATE_1", Const, 10}, + {"R_ARM_PRIVATE_10", Const, 10}, + {"R_ARM_PRIVATE_11", Const, 10}, + {"R_ARM_PRIVATE_12", Const, 10}, + {"R_ARM_PRIVATE_13", Const, 10}, + {"R_ARM_PRIVATE_14", Const, 10}, + {"R_ARM_PRIVATE_15", Const, 10}, + {"R_ARM_PRIVATE_2", Const, 10}, + {"R_ARM_PRIVATE_3", Const, 10}, + {"R_ARM_PRIVATE_4", Const, 10}, + {"R_ARM_PRIVATE_5", Const, 10}, + {"R_ARM_PRIVATE_6", Const, 10}, + {"R_ARM_PRIVATE_7", Const, 10}, + {"R_ARM_PRIVATE_8", Const, 10}, + {"R_ARM_PRIVATE_9", Const, 10}, + {"R_ARM_RABS32", Const, 0}, + {"R_ARM_RBASE", Const, 0}, + {"R_ARM_REL32", Const, 0}, + {"R_ARM_REL32_NOI", Const, 10}, + {"R_ARM_RELATIVE", Const, 0}, + {"R_ARM_RPC24", Const, 0}, + {"R_ARM_RREL32", Const, 0}, + {"R_ARM_RSBREL32", Const, 0}, + {"R_ARM_RXPC25", Const, 10}, + {"R_ARM_SBREL31", Const, 10}, + {"R_ARM_SBREL32", Const, 0}, + {"R_ARM_SWI24", Const, 0}, + {"R_ARM_TARGET1", Const, 10}, + {"R_ARM_TARGET2", Const, 10}, + {"R_ARM_THM_ABS5", Const, 0}, + {"R_ARM_THM_ALU_ABS_G0_NC", Const, 10}, + {"R_ARM_THM_ALU_ABS_G1_NC", Const, 10}, + {"R_ARM_THM_ALU_ABS_G2_NC", Const, 10}, + {"R_ARM_THM_ALU_ABS_G3", Const, 10}, + {"R_ARM_THM_ALU_PREL_11_0", Const, 10}, + {"R_ARM_THM_GOT_BREL12", Const, 10}, + {"R_ARM_THM_JUMP11", Const, 10}, + {"R_ARM_THM_JUMP19", Const, 10}, + {"R_ARM_THM_JUMP24", Const, 10}, + {"R_ARM_THM_JUMP6", Const, 10}, + {"R_ARM_THM_JUMP8", Const, 10}, + {"R_ARM_THM_MOVT_ABS", Const, 10}, + {"R_ARM_THM_MOVT_BREL", Const, 10}, + {"R_ARM_THM_MOVT_PREL", Const, 10}, + {"R_ARM_THM_MOVW_ABS_NC", Const, 10}, + {"R_ARM_THM_MOVW_BREL", Const, 10}, + {"R_ARM_THM_MOVW_BREL_NC", Const, 10}, + {"R_ARM_THM_MOVW_PREL_NC", Const, 10}, + {"R_ARM_THM_PC12", Const, 10}, + {"R_ARM_THM_PC22", Const, 0}, + {"R_ARM_THM_PC8", Const, 0}, + {"R_ARM_THM_RPC22", Const, 0}, + {"R_ARM_THM_SWI8", Const, 0}, + {"R_ARM_THM_TLS_CALL", Const, 10}, + {"R_ARM_THM_TLS_DESCSEQ16", Const, 10}, + {"R_ARM_THM_TLS_DESCSEQ32", Const, 10}, + {"R_ARM_THM_XPC22", Const, 0}, + {"R_ARM_TLS_CALL", Const, 10}, + {"R_ARM_TLS_DESCSEQ", Const, 10}, + {"R_ARM_TLS_DTPMOD32", Const, 10}, + {"R_ARM_TLS_DTPOFF32", Const, 10}, + {"R_ARM_TLS_GD32", Const, 10}, + {"R_ARM_TLS_GOTDESC", Const, 10}, + {"R_ARM_TLS_IE12GP", Const, 10}, + {"R_ARM_TLS_IE32", Const, 10}, + {"R_ARM_TLS_LDM32", Const, 10}, + {"R_ARM_TLS_LDO12", Const, 10}, + {"R_ARM_TLS_LDO32", Const, 10}, + {"R_ARM_TLS_LE12", Const, 10}, + {"R_ARM_TLS_LE32", Const, 10}, + {"R_ARM_TLS_TPOFF32", Const, 10}, + {"R_ARM_V4BX", Const, 10}, + {"R_ARM_XPC25", Const, 0}, + {"R_INFO", Func, 0}, + {"R_INFO32", Func, 0}, + {"R_LARCH", Type, 19}, + {"R_LARCH_32", Const, 19}, + {"R_LARCH_32_PCREL", Const, 20}, + {"R_LARCH_64", Const, 19}, + {"R_LARCH_64_PCREL", Const, 22}, + {"R_LARCH_ABS64_HI12", Const, 20}, + {"R_LARCH_ABS64_LO20", Const, 20}, + {"R_LARCH_ABS_HI20", Const, 20}, + {"R_LARCH_ABS_LO12", Const, 20}, + {"R_LARCH_ADD16", Const, 19}, + {"R_LARCH_ADD24", Const, 19}, + {"R_LARCH_ADD32", Const, 19}, + {"R_LARCH_ADD6", Const, 22}, + {"R_LARCH_ADD64", Const, 19}, + {"R_LARCH_ADD8", Const, 19}, + {"R_LARCH_ADD_ULEB128", Const, 22}, + {"R_LARCH_ALIGN", Const, 22}, + {"R_LARCH_B16", Const, 20}, + {"R_LARCH_B21", Const, 20}, + {"R_LARCH_B26", Const, 20}, + {"R_LARCH_CFA", Const, 22}, + {"R_LARCH_COPY", Const, 19}, + {"R_LARCH_DELETE", Const, 22}, + {"R_LARCH_GNU_VTENTRY", Const, 20}, + {"R_LARCH_GNU_VTINHERIT", Const, 20}, + {"R_LARCH_GOT64_HI12", Const, 20}, + {"R_LARCH_GOT64_LO20", Const, 20}, + {"R_LARCH_GOT64_PC_HI12", Const, 20}, + {"R_LARCH_GOT64_PC_LO20", Const, 20}, + {"R_LARCH_GOT_HI20", Const, 20}, + {"R_LARCH_GOT_LO12", Const, 20}, + {"R_LARCH_GOT_PC_HI20", Const, 20}, + {"R_LARCH_GOT_PC_LO12", Const, 20}, + {"R_LARCH_IRELATIVE", Const, 19}, + {"R_LARCH_JUMP_SLOT", Const, 19}, + {"R_LARCH_MARK_LA", Const, 19}, + {"R_LARCH_MARK_PCREL", Const, 19}, + {"R_LARCH_NONE", Const, 19}, + {"R_LARCH_PCALA64_HI12", Const, 20}, + {"R_LARCH_PCALA64_LO20", Const, 20}, + {"R_LARCH_PCALA_HI20", Const, 20}, + {"R_LARCH_PCALA_LO12", Const, 20}, + {"R_LARCH_PCREL20_S2", Const, 22}, + {"R_LARCH_RELATIVE", Const, 19}, + {"R_LARCH_RELAX", Const, 20}, + {"R_LARCH_SOP_ADD", Const, 19}, + {"R_LARCH_SOP_AND", Const, 19}, + {"R_LARCH_SOP_ASSERT", Const, 19}, + {"R_LARCH_SOP_IF_ELSE", Const, 19}, + {"R_LARCH_SOP_NOT", Const, 19}, + {"R_LARCH_SOP_POP_32_S_0_10_10_16_S2", Const, 19}, + {"R_LARCH_SOP_POP_32_S_0_5_10_16_S2", Const, 19}, + {"R_LARCH_SOP_POP_32_S_10_12", Const, 19}, + {"R_LARCH_SOP_POP_32_S_10_16", Const, 19}, + {"R_LARCH_SOP_POP_32_S_10_16_S2", Const, 19}, + {"R_LARCH_SOP_POP_32_S_10_5", Const, 19}, + {"R_LARCH_SOP_POP_32_S_5_20", Const, 19}, + {"R_LARCH_SOP_POP_32_U", Const, 19}, + {"R_LARCH_SOP_POP_32_U_10_12", Const, 19}, + {"R_LARCH_SOP_PUSH_ABSOLUTE", Const, 19}, + {"R_LARCH_SOP_PUSH_DUP", Const, 19}, + {"R_LARCH_SOP_PUSH_GPREL", Const, 19}, + {"R_LARCH_SOP_PUSH_PCREL", Const, 19}, + {"R_LARCH_SOP_PUSH_PLT_PCREL", Const, 19}, + {"R_LARCH_SOP_PUSH_TLS_GD", Const, 19}, + {"R_LARCH_SOP_PUSH_TLS_GOT", Const, 19}, + {"R_LARCH_SOP_PUSH_TLS_TPREL", Const, 19}, + {"R_LARCH_SOP_SL", Const, 19}, + {"R_LARCH_SOP_SR", Const, 19}, + {"R_LARCH_SOP_SUB", Const, 19}, + {"R_LARCH_SUB16", Const, 19}, + {"R_LARCH_SUB24", Const, 19}, + {"R_LARCH_SUB32", Const, 19}, + {"R_LARCH_SUB6", Const, 22}, + {"R_LARCH_SUB64", Const, 19}, + {"R_LARCH_SUB8", Const, 19}, + {"R_LARCH_SUB_ULEB128", Const, 22}, + {"R_LARCH_TLS_DTPMOD32", Const, 19}, + {"R_LARCH_TLS_DTPMOD64", Const, 19}, + {"R_LARCH_TLS_DTPREL32", Const, 19}, + {"R_LARCH_TLS_DTPREL64", Const, 19}, + {"R_LARCH_TLS_GD_HI20", Const, 20}, + {"R_LARCH_TLS_GD_PC_HI20", Const, 20}, + {"R_LARCH_TLS_IE64_HI12", Const, 20}, + {"R_LARCH_TLS_IE64_LO20", Const, 20}, + {"R_LARCH_TLS_IE64_PC_HI12", Const, 20}, + {"R_LARCH_TLS_IE64_PC_LO20", Const, 20}, + {"R_LARCH_TLS_IE_HI20", Const, 20}, + {"R_LARCH_TLS_IE_LO12", Const, 20}, + {"R_LARCH_TLS_IE_PC_HI20", Const, 20}, + {"R_LARCH_TLS_IE_PC_LO12", Const, 20}, + {"R_LARCH_TLS_LD_HI20", Const, 20}, + {"R_LARCH_TLS_LD_PC_HI20", Const, 20}, + {"R_LARCH_TLS_LE64_HI12", Const, 20}, + {"R_LARCH_TLS_LE64_LO20", Const, 20}, + {"R_LARCH_TLS_LE_HI20", Const, 20}, + {"R_LARCH_TLS_LE_LO12", Const, 20}, + {"R_LARCH_TLS_TPREL32", Const, 19}, + {"R_LARCH_TLS_TPREL64", Const, 19}, + {"R_MIPS", Type, 6}, + {"R_MIPS_16", Const, 6}, + {"R_MIPS_26", Const, 6}, + {"R_MIPS_32", Const, 6}, + {"R_MIPS_64", Const, 6}, + {"R_MIPS_ADD_IMMEDIATE", Const, 6}, + {"R_MIPS_CALL16", Const, 6}, + {"R_MIPS_CALL_HI16", Const, 6}, + {"R_MIPS_CALL_LO16", Const, 6}, + {"R_MIPS_DELETE", Const, 6}, + {"R_MIPS_GOT16", Const, 6}, + {"R_MIPS_GOT_DISP", Const, 6}, + {"R_MIPS_GOT_HI16", Const, 6}, + {"R_MIPS_GOT_LO16", Const, 6}, + {"R_MIPS_GOT_OFST", Const, 6}, + {"R_MIPS_GOT_PAGE", Const, 6}, + {"R_MIPS_GPREL16", Const, 6}, + {"R_MIPS_GPREL32", Const, 6}, + {"R_MIPS_HI16", Const, 6}, + {"R_MIPS_HIGHER", Const, 6}, + {"R_MIPS_HIGHEST", Const, 6}, + {"R_MIPS_INSERT_A", Const, 6}, + {"R_MIPS_INSERT_B", Const, 6}, + {"R_MIPS_JALR", Const, 6}, + {"R_MIPS_LITERAL", Const, 6}, + {"R_MIPS_LO16", Const, 6}, + {"R_MIPS_NONE", Const, 6}, + {"R_MIPS_PC16", Const, 6}, + {"R_MIPS_PC32", Const, 22}, + {"R_MIPS_PJUMP", Const, 6}, + {"R_MIPS_REL16", Const, 6}, + {"R_MIPS_REL32", Const, 6}, + {"R_MIPS_RELGOT", Const, 6}, + {"R_MIPS_SCN_DISP", Const, 6}, + {"R_MIPS_SHIFT5", Const, 6}, + {"R_MIPS_SHIFT6", Const, 6}, + {"R_MIPS_SUB", Const, 6}, + {"R_MIPS_TLS_DTPMOD32", Const, 6}, + {"R_MIPS_TLS_DTPMOD64", Const, 6}, + {"R_MIPS_TLS_DTPREL32", Const, 6}, + {"R_MIPS_TLS_DTPREL64", Const, 6}, + {"R_MIPS_TLS_DTPREL_HI16", Const, 6}, + {"R_MIPS_TLS_DTPREL_LO16", Const, 6}, + {"R_MIPS_TLS_GD", Const, 6}, + {"R_MIPS_TLS_GOTTPREL", Const, 6}, + {"R_MIPS_TLS_LDM", Const, 6}, + {"R_MIPS_TLS_TPREL32", Const, 6}, + {"R_MIPS_TLS_TPREL64", Const, 6}, + {"R_MIPS_TLS_TPREL_HI16", Const, 6}, + {"R_MIPS_TLS_TPREL_LO16", Const, 6}, + {"R_PPC", Type, 0}, + {"R_PPC64", Type, 5}, + {"R_PPC64_ADDR14", Const, 5}, + {"R_PPC64_ADDR14_BRNTAKEN", Const, 5}, + {"R_PPC64_ADDR14_BRTAKEN", Const, 5}, + {"R_PPC64_ADDR16", Const, 5}, + {"R_PPC64_ADDR16_DS", Const, 5}, + {"R_PPC64_ADDR16_HA", Const, 5}, + {"R_PPC64_ADDR16_HI", Const, 5}, + {"R_PPC64_ADDR16_HIGH", Const, 10}, + {"R_PPC64_ADDR16_HIGHA", Const, 10}, + {"R_PPC64_ADDR16_HIGHER", Const, 5}, + {"R_PPC64_ADDR16_HIGHER34", Const, 20}, + {"R_PPC64_ADDR16_HIGHERA", Const, 5}, + {"R_PPC64_ADDR16_HIGHERA34", Const, 20}, + {"R_PPC64_ADDR16_HIGHEST", Const, 5}, + {"R_PPC64_ADDR16_HIGHEST34", Const, 20}, + {"R_PPC64_ADDR16_HIGHESTA", Const, 5}, + {"R_PPC64_ADDR16_HIGHESTA34", Const, 20}, + {"R_PPC64_ADDR16_LO", Const, 5}, + {"R_PPC64_ADDR16_LO_DS", Const, 5}, + {"R_PPC64_ADDR24", Const, 5}, + {"R_PPC64_ADDR32", Const, 5}, + {"R_PPC64_ADDR64", Const, 5}, + {"R_PPC64_ADDR64_LOCAL", Const, 10}, + {"R_PPC64_COPY", Const, 20}, + {"R_PPC64_D28", Const, 20}, + {"R_PPC64_D34", Const, 20}, + {"R_PPC64_D34_HA30", Const, 20}, + {"R_PPC64_D34_HI30", Const, 20}, + {"R_PPC64_D34_LO", Const, 20}, + {"R_PPC64_DTPMOD64", Const, 5}, + {"R_PPC64_DTPREL16", Const, 5}, + {"R_PPC64_DTPREL16_DS", Const, 5}, + {"R_PPC64_DTPREL16_HA", Const, 5}, + {"R_PPC64_DTPREL16_HI", Const, 5}, + {"R_PPC64_DTPREL16_HIGH", Const, 10}, + {"R_PPC64_DTPREL16_HIGHA", Const, 10}, + {"R_PPC64_DTPREL16_HIGHER", Const, 5}, + {"R_PPC64_DTPREL16_HIGHERA", Const, 5}, + {"R_PPC64_DTPREL16_HIGHEST", Const, 5}, + {"R_PPC64_DTPREL16_HIGHESTA", Const, 5}, + {"R_PPC64_DTPREL16_LO", Const, 5}, + {"R_PPC64_DTPREL16_LO_DS", Const, 5}, + {"R_PPC64_DTPREL34", Const, 20}, + {"R_PPC64_DTPREL64", Const, 5}, + {"R_PPC64_ENTRY", Const, 10}, + {"R_PPC64_GLOB_DAT", Const, 20}, + {"R_PPC64_GNU_VTENTRY", Const, 20}, + {"R_PPC64_GNU_VTINHERIT", Const, 20}, + {"R_PPC64_GOT16", Const, 5}, + {"R_PPC64_GOT16_DS", Const, 5}, + {"R_PPC64_GOT16_HA", Const, 5}, + {"R_PPC64_GOT16_HI", Const, 5}, + {"R_PPC64_GOT16_LO", Const, 5}, + {"R_PPC64_GOT16_LO_DS", Const, 5}, + {"R_PPC64_GOT_DTPREL16_DS", Const, 5}, + {"R_PPC64_GOT_DTPREL16_HA", Const, 5}, + {"R_PPC64_GOT_DTPREL16_HI", Const, 5}, + {"R_PPC64_GOT_DTPREL16_LO_DS", Const, 5}, + {"R_PPC64_GOT_DTPREL_PCREL34", Const, 20}, + {"R_PPC64_GOT_PCREL34", Const, 20}, + {"R_PPC64_GOT_TLSGD16", Const, 5}, + {"R_PPC64_GOT_TLSGD16_HA", Const, 5}, + {"R_PPC64_GOT_TLSGD16_HI", Const, 5}, + {"R_PPC64_GOT_TLSGD16_LO", Const, 5}, + {"R_PPC64_GOT_TLSGD_PCREL34", Const, 20}, + {"R_PPC64_GOT_TLSLD16", Const, 5}, + {"R_PPC64_GOT_TLSLD16_HA", Const, 5}, + {"R_PPC64_GOT_TLSLD16_HI", Const, 5}, + {"R_PPC64_GOT_TLSLD16_LO", Const, 5}, + {"R_PPC64_GOT_TLSLD_PCREL34", Const, 20}, + {"R_PPC64_GOT_TPREL16_DS", Const, 5}, + {"R_PPC64_GOT_TPREL16_HA", Const, 5}, + {"R_PPC64_GOT_TPREL16_HI", Const, 5}, + {"R_PPC64_GOT_TPREL16_LO_DS", Const, 5}, + {"R_PPC64_GOT_TPREL_PCREL34", Const, 20}, + {"R_PPC64_IRELATIVE", Const, 10}, + {"R_PPC64_JMP_IREL", Const, 10}, + {"R_PPC64_JMP_SLOT", Const, 5}, + {"R_PPC64_NONE", Const, 5}, + {"R_PPC64_PCREL28", Const, 20}, + {"R_PPC64_PCREL34", Const, 20}, + {"R_PPC64_PCREL_OPT", Const, 20}, + {"R_PPC64_PLT16_HA", Const, 20}, + {"R_PPC64_PLT16_HI", Const, 20}, + {"R_PPC64_PLT16_LO", Const, 20}, + {"R_PPC64_PLT16_LO_DS", Const, 10}, + {"R_PPC64_PLT32", Const, 20}, + {"R_PPC64_PLT64", Const, 20}, + {"R_PPC64_PLTCALL", Const, 20}, + {"R_PPC64_PLTCALL_NOTOC", Const, 20}, + {"R_PPC64_PLTGOT16", Const, 10}, + {"R_PPC64_PLTGOT16_DS", Const, 10}, + {"R_PPC64_PLTGOT16_HA", Const, 10}, + {"R_PPC64_PLTGOT16_HI", Const, 10}, + {"R_PPC64_PLTGOT16_LO", Const, 10}, + {"R_PPC64_PLTGOT_LO_DS", Const, 10}, + {"R_PPC64_PLTREL32", Const, 20}, + {"R_PPC64_PLTREL64", Const, 20}, + {"R_PPC64_PLTSEQ", Const, 20}, + {"R_PPC64_PLTSEQ_NOTOC", Const, 20}, + {"R_PPC64_PLT_PCREL34", Const, 20}, + {"R_PPC64_PLT_PCREL34_NOTOC", Const, 20}, + {"R_PPC64_REL14", Const, 5}, + {"R_PPC64_REL14_BRNTAKEN", Const, 5}, + {"R_PPC64_REL14_BRTAKEN", Const, 5}, + {"R_PPC64_REL16", Const, 5}, + {"R_PPC64_REL16DX_HA", Const, 10}, + {"R_PPC64_REL16_HA", Const, 5}, + {"R_PPC64_REL16_HI", Const, 5}, + {"R_PPC64_REL16_HIGH", Const, 20}, + {"R_PPC64_REL16_HIGHA", Const, 20}, + {"R_PPC64_REL16_HIGHER", Const, 20}, + {"R_PPC64_REL16_HIGHER34", Const, 20}, + {"R_PPC64_REL16_HIGHERA", Const, 20}, + {"R_PPC64_REL16_HIGHERA34", Const, 20}, + {"R_PPC64_REL16_HIGHEST", Const, 20}, + {"R_PPC64_REL16_HIGHEST34", Const, 20}, + {"R_PPC64_REL16_HIGHESTA", Const, 20}, + {"R_PPC64_REL16_HIGHESTA34", Const, 20}, + {"R_PPC64_REL16_LO", Const, 5}, + {"R_PPC64_REL24", Const, 5}, + {"R_PPC64_REL24_NOTOC", Const, 10}, + {"R_PPC64_REL24_P9NOTOC", Const, 21}, + {"R_PPC64_REL30", Const, 20}, + {"R_PPC64_REL32", Const, 5}, + {"R_PPC64_REL64", Const, 5}, + {"R_PPC64_RELATIVE", Const, 18}, + {"R_PPC64_SECTOFF", Const, 20}, + {"R_PPC64_SECTOFF_DS", Const, 10}, + {"R_PPC64_SECTOFF_HA", Const, 20}, + {"R_PPC64_SECTOFF_HI", Const, 20}, + {"R_PPC64_SECTOFF_LO", Const, 20}, + {"R_PPC64_SECTOFF_LO_DS", Const, 10}, + {"R_PPC64_TLS", Const, 5}, + {"R_PPC64_TLSGD", Const, 5}, + {"R_PPC64_TLSLD", Const, 5}, + {"R_PPC64_TOC", Const, 5}, + {"R_PPC64_TOC16", Const, 5}, + {"R_PPC64_TOC16_DS", Const, 5}, + {"R_PPC64_TOC16_HA", Const, 5}, + {"R_PPC64_TOC16_HI", Const, 5}, + {"R_PPC64_TOC16_LO", Const, 5}, + {"R_PPC64_TOC16_LO_DS", Const, 5}, + {"R_PPC64_TOCSAVE", Const, 10}, + {"R_PPC64_TPREL16", Const, 5}, + {"R_PPC64_TPREL16_DS", Const, 5}, + {"R_PPC64_TPREL16_HA", Const, 5}, + {"R_PPC64_TPREL16_HI", Const, 5}, + {"R_PPC64_TPREL16_HIGH", Const, 10}, + {"R_PPC64_TPREL16_HIGHA", Const, 10}, + {"R_PPC64_TPREL16_HIGHER", Const, 5}, + {"R_PPC64_TPREL16_HIGHERA", Const, 5}, + {"R_PPC64_TPREL16_HIGHEST", Const, 5}, + {"R_PPC64_TPREL16_HIGHESTA", Const, 5}, + {"R_PPC64_TPREL16_LO", Const, 5}, + {"R_PPC64_TPREL16_LO_DS", Const, 5}, + {"R_PPC64_TPREL34", Const, 20}, + {"R_PPC64_TPREL64", Const, 5}, + {"R_PPC64_UADDR16", Const, 20}, + {"R_PPC64_UADDR32", Const, 20}, + {"R_PPC64_UADDR64", Const, 20}, + {"R_PPC_ADDR14", Const, 0}, + {"R_PPC_ADDR14_BRNTAKEN", Const, 0}, + {"R_PPC_ADDR14_BRTAKEN", Const, 0}, + {"R_PPC_ADDR16", Const, 0}, + {"R_PPC_ADDR16_HA", Const, 0}, + {"R_PPC_ADDR16_HI", Const, 0}, + {"R_PPC_ADDR16_LO", Const, 0}, + {"R_PPC_ADDR24", Const, 0}, + {"R_PPC_ADDR32", Const, 0}, + {"R_PPC_COPY", Const, 0}, + {"R_PPC_DTPMOD32", Const, 0}, + {"R_PPC_DTPREL16", Const, 0}, + {"R_PPC_DTPREL16_HA", Const, 0}, + {"R_PPC_DTPREL16_HI", Const, 0}, + {"R_PPC_DTPREL16_LO", Const, 0}, + {"R_PPC_DTPREL32", Const, 0}, + {"R_PPC_EMB_BIT_FLD", Const, 0}, + {"R_PPC_EMB_MRKREF", Const, 0}, + {"R_PPC_EMB_NADDR16", Const, 0}, + {"R_PPC_EMB_NADDR16_HA", Const, 0}, + {"R_PPC_EMB_NADDR16_HI", Const, 0}, + {"R_PPC_EMB_NADDR16_LO", Const, 0}, + {"R_PPC_EMB_NADDR32", Const, 0}, + {"R_PPC_EMB_RELSDA", Const, 0}, + {"R_PPC_EMB_RELSEC16", Const, 0}, + {"R_PPC_EMB_RELST_HA", Const, 0}, + {"R_PPC_EMB_RELST_HI", Const, 0}, + {"R_PPC_EMB_RELST_LO", Const, 0}, + {"R_PPC_EMB_SDA21", Const, 0}, + {"R_PPC_EMB_SDA2I16", Const, 0}, + {"R_PPC_EMB_SDA2REL", Const, 0}, + {"R_PPC_EMB_SDAI16", Const, 0}, + {"R_PPC_GLOB_DAT", Const, 0}, + {"R_PPC_GOT16", Const, 0}, + {"R_PPC_GOT16_HA", Const, 0}, + {"R_PPC_GOT16_HI", Const, 0}, + {"R_PPC_GOT16_LO", Const, 0}, + {"R_PPC_GOT_TLSGD16", Const, 0}, + {"R_PPC_GOT_TLSGD16_HA", Const, 0}, + {"R_PPC_GOT_TLSGD16_HI", Const, 0}, + {"R_PPC_GOT_TLSGD16_LO", Const, 0}, + {"R_PPC_GOT_TLSLD16", Const, 0}, + {"R_PPC_GOT_TLSLD16_HA", Const, 0}, + {"R_PPC_GOT_TLSLD16_HI", Const, 0}, + {"R_PPC_GOT_TLSLD16_LO", Const, 0}, + {"R_PPC_GOT_TPREL16", Const, 0}, + {"R_PPC_GOT_TPREL16_HA", Const, 0}, + {"R_PPC_GOT_TPREL16_HI", Const, 0}, + {"R_PPC_GOT_TPREL16_LO", Const, 0}, + {"R_PPC_JMP_SLOT", Const, 0}, + {"R_PPC_LOCAL24PC", Const, 0}, + {"R_PPC_NONE", Const, 0}, + {"R_PPC_PLT16_HA", Const, 0}, + {"R_PPC_PLT16_HI", Const, 0}, + {"R_PPC_PLT16_LO", Const, 0}, + {"R_PPC_PLT32", Const, 0}, + {"R_PPC_PLTREL24", Const, 0}, + {"R_PPC_PLTREL32", Const, 0}, + {"R_PPC_REL14", Const, 0}, + {"R_PPC_REL14_BRNTAKEN", Const, 0}, + {"R_PPC_REL14_BRTAKEN", Const, 0}, + {"R_PPC_REL24", Const, 0}, + {"R_PPC_REL32", Const, 0}, + {"R_PPC_RELATIVE", Const, 0}, + {"R_PPC_SDAREL16", Const, 0}, + {"R_PPC_SECTOFF", Const, 0}, + {"R_PPC_SECTOFF_HA", Const, 0}, + {"R_PPC_SECTOFF_HI", Const, 0}, + {"R_PPC_SECTOFF_LO", Const, 0}, + {"R_PPC_TLS", Const, 0}, + {"R_PPC_TPREL16", Const, 0}, + {"R_PPC_TPREL16_HA", Const, 0}, + {"R_PPC_TPREL16_HI", Const, 0}, + {"R_PPC_TPREL16_LO", Const, 0}, + {"R_PPC_TPREL32", Const, 0}, + {"R_PPC_UADDR16", Const, 0}, + {"R_PPC_UADDR32", Const, 0}, + {"R_RISCV", Type, 11}, + {"R_RISCV_32", Const, 11}, + {"R_RISCV_32_PCREL", Const, 12}, + {"R_RISCV_64", Const, 11}, + {"R_RISCV_ADD16", Const, 11}, + {"R_RISCV_ADD32", Const, 11}, + {"R_RISCV_ADD64", Const, 11}, + {"R_RISCV_ADD8", Const, 11}, + {"R_RISCV_ALIGN", Const, 11}, + {"R_RISCV_BRANCH", Const, 11}, + {"R_RISCV_CALL", Const, 11}, + {"R_RISCV_CALL_PLT", Const, 11}, + {"R_RISCV_COPY", Const, 11}, + {"R_RISCV_GNU_VTENTRY", Const, 11}, + {"R_RISCV_GNU_VTINHERIT", Const, 11}, + {"R_RISCV_GOT_HI20", Const, 11}, + {"R_RISCV_GPREL_I", Const, 11}, + {"R_RISCV_GPREL_S", Const, 11}, + {"R_RISCV_HI20", Const, 11}, + {"R_RISCV_JAL", Const, 11}, + {"R_RISCV_JUMP_SLOT", Const, 11}, + {"R_RISCV_LO12_I", Const, 11}, + {"R_RISCV_LO12_S", Const, 11}, + {"R_RISCV_NONE", Const, 11}, + {"R_RISCV_PCREL_HI20", Const, 11}, + {"R_RISCV_PCREL_LO12_I", Const, 11}, + {"R_RISCV_PCREL_LO12_S", Const, 11}, + {"R_RISCV_RELATIVE", Const, 11}, + {"R_RISCV_RELAX", Const, 11}, + {"R_RISCV_RVC_BRANCH", Const, 11}, + {"R_RISCV_RVC_JUMP", Const, 11}, + {"R_RISCV_RVC_LUI", Const, 11}, + {"R_RISCV_SET16", Const, 11}, + {"R_RISCV_SET32", Const, 11}, + {"R_RISCV_SET6", Const, 11}, + {"R_RISCV_SET8", Const, 11}, + {"R_RISCV_SUB16", Const, 11}, + {"R_RISCV_SUB32", Const, 11}, + {"R_RISCV_SUB6", Const, 11}, + {"R_RISCV_SUB64", Const, 11}, + {"R_RISCV_SUB8", Const, 11}, + {"R_RISCV_TLS_DTPMOD32", Const, 11}, + {"R_RISCV_TLS_DTPMOD64", Const, 11}, + {"R_RISCV_TLS_DTPREL32", Const, 11}, + {"R_RISCV_TLS_DTPREL64", Const, 11}, + {"R_RISCV_TLS_GD_HI20", Const, 11}, + {"R_RISCV_TLS_GOT_HI20", Const, 11}, + {"R_RISCV_TLS_TPREL32", Const, 11}, + {"R_RISCV_TLS_TPREL64", Const, 11}, + {"R_RISCV_TPREL_ADD", Const, 11}, + {"R_RISCV_TPREL_HI20", Const, 11}, + {"R_RISCV_TPREL_I", Const, 11}, + {"R_RISCV_TPREL_LO12_I", Const, 11}, + {"R_RISCV_TPREL_LO12_S", Const, 11}, + {"R_RISCV_TPREL_S", Const, 11}, + {"R_SPARC", Type, 0}, + {"R_SPARC_10", Const, 0}, + {"R_SPARC_11", Const, 0}, + {"R_SPARC_13", Const, 0}, + {"R_SPARC_16", Const, 0}, + {"R_SPARC_22", Const, 0}, + {"R_SPARC_32", Const, 0}, + {"R_SPARC_5", Const, 0}, + {"R_SPARC_6", Const, 0}, + {"R_SPARC_64", Const, 0}, + {"R_SPARC_7", Const, 0}, + {"R_SPARC_8", Const, 0}, + {"R_SPARC_COPY", Const, 0}, + {"R_SPARC_DISP16", Const, 0}, + {"R_SPARC_DISP32", Const, 0}, + {"R_SPARC_DISP64", Const, 0}, + {"R_SPARC_DISP8", Const, 0}, + {"R_SPARC_GLOB_DAT", Const, 0}, + {"R_SPARC_GLOB_JMP", Const, 0}, + {"R_SPARC_GOT10", Const, 0}, + {"R_SPARC_GOT13", Const, 0}, + {"R_SPARC_GOT22", Const, 0}, + {"R_SPARC_H44", Const, 0}, + {"R_SPARC_HH22", Const, 0}, + {"R_SPARC_HI22", Const, 0}, + {"R_SPARC_HIPLT22", Const, 0}, + {"R_SPARC_HIX22", Const, 0}, + {"R_SPARC_HM10", Const, 0}, + {"R_SPARC_JMP_SLOT", Const, 0}, + {"R_SPARC_L44", Const, 0}, + {"R_SPARC_LM22", Const, 0}, + {"R_SPARC_LO10", Const, 0}, + {"R_SPARC_LOPLT10", Const, 0}, + {"R_SPARC_LOX10", Const, 0}, + {"R_SPARC_M44", Const, 0}, + {"R_SPARC_NONE", Const, 0}, + {"R_SPARC_OLO10", Const, 0}, + {"R_SPARC_PC10", Const, 0}, + {"R_SPARC_PC22", Const, 0}, + {"R_SPARC_PCPLT10", Const, 0}, + {"R_SPARC_PCPLT22", Const, 0}, + {"R_SPARC_PCPLT32", Const, 0}, + {"R_SPARC_PC_HH22", Const, 0}, + {"R_SPARC_PC_HM10", Const, 0}, + {"R_SPARC_PC_LM22", Const, 0}, + {"R_SPARC_PLT32", Const, 0}, + {"R_SPARC_PLT64", Const, 0}, + {"R_SPARC_REGISTER", Const, 0}, + {"R_SPARC_RELATIVE", Const, 0}, + {"R_SPARC_UA16", Const, 0}, + {"R_SPARC_UA32", Const, 0}, + {"R_SPARC_UA64", Const, 0}, + {"R_SPARC_WDISP16", Const, 0}, + {"R_SPARC_WDISP19", Const, 0}, + {"R_SPARC_WDISP22", Const, 0}, + {"R_SPARC_WDISP30", Const, 0}, + {"R_SPARC_WPLT30", Const, 0}, + {"R_SYM32", Func, 0}, + {"R_SYM64", Func, 0}, + {"R_TYPE32", Func, 0}, + {"R_TYPE64", Func, 0}, + {"R_X86_64", Type, 0}, + {"R_X86_64_16", Const, 0}, + {"R_X86_64_32", Const, 0}, + {"R_X86_64_32S", Const, 0}, + {"R_X86_64_64", Const, 0}, + {"R_X86_64_8", Const, 0}, + {"R_X86_64_COPY", Const, 0}, + {"R_X86_64_DTPMOD64", Const, 0}, + {"R_X86_64_DTPOFF32", Const, 0}, + {"R_X86_64_DTPOFF64", Const, 0}, + {"R_X86_64_GLOB_DAT", Const, 0}, + {"R_X86_64_GOT32", Const, 0}, + {"R_X86_64_GOT64", Const, 10}, + {"R_X86_64_GOTOFF64", Const, 10}, + {"R_X86_64_GOTPC32", Const, 10}, + {"R_X86_64_GOTPC32_TLSDESC", Const, 10}, + {"R_X86_64_GOTPC64", Const, 10}, + {"R_X86_64_GOTPCREL", Const, 0}, + {"R_X86_64_GOTPCREL64", Const, 10}, + {"R_X86_64_GOTPCRELX", Const, 10}, + {"R_X86_64_GOTPLT64", Const, 10}, + {"R_X86_64_GOTTPOFF", Const, 0}, + {"R_X86_64_IRELATIVE", Const, 10}, + {"R_X86_64_JMP_SLOT", Const, 0}, + {"R_X86_64_NONE", Const, 0}, + {"R_X86_64_PC16", Const, 0}, + {"R_X86_64_PC32", Const, 0}, + {"R_X86_64_PC32_BND", Const, 10}, + {"R_X86_64_PC64", Const, 10}, + {"R_X86_64_PC8", Const, 0}, + {"R_X86_64_PLT32", Const, 0}, + {"R_X86_64_PLT32_BND", Const, 10}, + {"R_X86_64_PLTOFF64", Const, 10}, + {"R_X86_64_RELATIVE", Const, 0}, + {"R_X86_64_RELATIVE64", Const, 10}, + {"R_X86_64_REX_GOTPCRELX", Const, 10}, + {"R_X86_64_SIZE32", Const, 10}, + {"R_X86_64_SIZE64", Const, 10}, + {"R_X86_64_TLSDESC", Const, 10}, + {"R_X86_64_TLSDESC_CALL", Const, 10}, + {"R_X86_64_TLSGD", Const, 0}, + {"R_X86_64_TLSLD", Const, 0}, + {"R_X86_64_TPOFF32", Const, 0}, + {"R_X86_64_TPOFF64", Const, 0}, + {"Rel32", Type, 0}, + {"Rel32.Info", Field, 0}, + {"Rel32.Off", Field, 0}, + {"Rel64", Type, 0}, + {"Rel64.Info", Field, 0}, + {"Rel64.Off", Field, 0}, + {"Rela32", Type, 0}, + {"Rela32.Addend", Field, 0}, + {"Rela32.Info", Field, 0}, + {"Rela32.Off", Field, 0}, + {"Rela64", Type, 0}, + {"Rela64.Addend", Field, 0}, + {"Rela64.Info", Field, 0}, + {"Rela64.Off", Field, 0}, + {"SHF_ALLOC", Const, 0}, + {"SHF_COMPRESSED", Const, 6}, + {"SHF_EXECINSTR", Const, 0}, + {"SHF_GROUP", Const, 0}, + {"SHF_INFO_LINK", Const, 0}, + {"SHF_LINK_ORDER", Const, 0}, + {"SHF_MASKOS", Const, 0}, + {"SHF_MASKPROC", Const, 0}, + {"SHF_MERGE", Const, 0}, + {"SHF_OS_NONCONFORMING", Const, 0}, + {"SHF_STRINGS", Const, 0}, + {"SHF_TLS", Const, 0}, + {"SHF_WRITE", Const, 0}, + {"SHN_ABS", Const, 0}, + {"SHN_COMMON", Const, 0}, + {"SHN_HIOS", Const, 0}, + {"SHN_HIPROC", Const, 0}, + {"SHN_HIRESERVE", Const, 0}, + {"SHN_LOOS", Const, 0}, + {"SHN_LOPROC", Const, 0}, + {"SHN_LORESERVE", Const, 0}, + {"SHN_UNDEF", Const, 0}, + {"SHN_XINDEX", Const, 0}, + {"SHT_DYNAMIC", Const, 0}, + {"SHT_DYNSYM", Const, 0}, + {"SHT_FINI_ARRAY", Const, 0}, + {"SHT_GNU_ATTRIBUTES", Const, 0}, + {"SHT_GNU_HASH", Const, 0}, + {"SHT_GNU_LIBLIST", Const, 0}, + {"SHT_GNU_VERDEF", Const, 0}, + {"SHT_GNU_VERNEED", Const, 0}, + {"SHT_GNU_VERSYM", Const, 0}, + {"SHT_GROUP", Const, 0}, + {"SHT_HASH", Const, 0}, + {"SHT_HIOS", Const, 0}, + {"SHT_HIPROC", Const, 0}, + {"SHT_HIUSER", Const, 0}, + {"SHT_INIT_ARRAY", Const, 0}, + {"SHT_LOOS", Const, 0}, + {"SHT_LOPROC", Const, 0}, + {"SHT_LOUSER", Const, 0}, + {"SHT_MIPS_ABIFLAGS", Const, 17}, + {"SHT_NOBITS", Const, 0}, + {"SHT_NOTE", Const, 0}, + {"SHT_NULL", Const, 0}, + {"SHT_PREINIT_ARRAY", Const, 0}, + {"SHT_PROGBITS", Const, 0}, + {"SHT_REL", Const, 0}, + {"SHT_RELA", Const, 0}, + {"SHT_SHLIB", Const, 0}, + {"SHT_STRTAB", Const, 0}, + {"SHT_SYMTAB", Const, 0}, + {"SHT_SYMTAB_SHNDX", Const, 0}, + {"STB_GLOBAL", Const, 0}, + {"STB_HIOS", Const, 0}, + {"STB_HIPROC", Const, 0}, + {"STB_LOCAL", Const, 0}, + {"STB_LOOS", Const, 0}, + {"STB_LOPROC", Const, 0}, + {"STB_WEAK", Const, 0}, + {"STT_COMMON", Const, 0}, + {"STT_FILE", Const, 0}, + {"STT_FUNC", Const, 0}, + {"STT_HIOS", Const, 0}, + {"STT_HIPROC", Const, 0}, + {"STT_LOOS", Const, 0}, + {"STT_LOPROC", Const, 0}, + {"STT_NOTYPE", Const, 0}, + {"STT_OBJECT", Const, 0}, + {"STT_SECTION", Const, 0}, + {"STT_TLS", Const, 0}, + {"STV_DEFAULT", Const, 0}, + {"STV_HIDDEN", Const, 0}, + {"STV_INTERNAL", Const, 0}, + {"STV_PROTECTED", Const, 0}, + {"ST_BIND", Func, 0}, + {"ST_INFO", Func, 0}, + {"ST_TYPE", Func, 0}, + {"ST_VISIBILITY", Func, 0}, + {"Section", Type, 0}, + {"Section.ReaderAt", Field, 0}, + {"Section.SectionHeader", Field, 0}, + {"Section32", Type, 0}, + {"Section32.Addr", Field, 0}, + {"Section32.Addralign", Field, 0}, + {"Section32.Entsize", Field, 0}, + {"Section32.Flags", Field, 0}, + {"Section32.Info", Field, 0}, + {"Section32.Link", Field, 0}, + {"Section32.Name", Field, 0}, + {"Section32.Off", Field, 0}, + {"Section32.Size", Field, 0}, + {"Section32.Type", Field, 0}, + {"Section64", Type, 0}, + {"Section64.Addr", Field, 0}, + {"Section64.Addralign", Field, 0}, + {"Section64.Entsize", Field, 0}, + {"Section64.Flags", Field, 0}, + {"Section64.Info", Field, 0}, + {"Section64.Link", Field, 0}, + {"Section64.Name", Field, 0}, + {"Section64.Off", Field, 0}, + {"Section64.Size", Field, 0}, + {"Section64.Type", Field, 0}, + {"SectionFlag", Type, 0}, + {"SectionHeader", Type, 0}, + {"SectionHeader.Addr", Field, 0}, + {"SectionHeader.Addralign", Field, 0}, + {"SectionHeader.Entsize", Field, 0}, + {"SectionHeader.FileSize", Field, 6}, + {"SectionHeader.Flags", Field, 0}, + {"SectionHeader.Info", Field, 0}, + {"SectionHeader.Link", Field, 0}, + {"SectionHeader.Name", Field, 0}, + {"SectionHeader.Offset", Field, 0}, + {"SectionHeader.Size", Field, 0}, + {"SectionHeader.Type", Field, 0}, + {"SectionIndex", Type, 0}, + {"SectionType", Type, 0}, + {"Sym32", Type, 0}, + {"Sym32.Info", Field, 0}, + {"Sym32.Name", Field, 0}, + {"Sym32.Other", Field, 0}, + {"Sym32.Shndx", Field, 0}, + {"Sym32.Size", Field, 0}, + {"Sym32.Value", Field, 0}, + {"Sym32Size", Const, 0}, + {"Sym64", Type, 0}, + {"Sym64.Info", Field, 0}, + {"Sym64.Name", Field, 0}, + {"Sym64.Other", Field, 0}, + {"Sym64.Shndx", Field, 0}, + {"Sym64.Size", Field, 0}, + {"Sym64.Value", Field, 0}, + {"Sym64Size", Const, 0}, + {"SymBind", Type, 0}, + {"SymType", Type, 0}, + {"SymVis", Type, 0}, + {"Symbol", Type, 0}, + {"Symbol.Info", Field, 0}, + {"Symbol.Library", Field, 13}, + {"Symbol.Name", Field, 0}, + {"Symbol.Other", Field, 0}, + {"Symbol.Section", Field, 0}, + {"Symbol.Size", Field, 0}, + {"Symbol.Value", Field, 0}, + {"Symbol.Version", Field, 13}, + {"Type", Type, 0}, + {"Version", Type, 0}, + }, + "debug/gosym": { + {"(*DecodingError).Error", Method, 0}, + {"(*LineTable).LineToPC", Method, 0}, + {"(*LineTable).PCToLine", Method, 0}, + {"(*Sym).BaseName", Method, 0}, + {"(*Sym).PackageName", Method, 0}, + {"(*Sym).ReceiverName", Method, 0}, + {"(*Sym).Static", Method, 0}, + {"(*Table).LineToPC", Method, 0}, + {"(*Table).LookupFunc", Method, 0}, + {"(*Table).LookupSym", Method, 0}, + {"(*Table).PCToFunc", Method, 0}, + {"(*Table).PCToLine", Method, 0}, + {"(*Table).SymByAddr", Method, 0}, + {"(*UnknownLineError).Error", Method, 0}, + {"(Func).BaseName", Method, 0}, + {"(Func).PackageName", Method, 0}, + {"(Func).ReceiverName", Method, 0}, + {"(Func).Static", Method, 0}, + {"(UnknownFileError).Error", Method, 0}, + {"DecodingError", Type, 0}, + {"Func", Type, 0}, + {"Func.End", Field, 0}, + {"Func.Entry", Field, 0}, + {"Func.FrameSize", Field, 0}, + {"Func.LineTable", Field, 0}, + {"Func.Locals", Field, 0}, + {"Func.Obj", Field, 0}, + {"Func.Params", Field, 0}, + {"Func.Sym", Field, 0}, + {"LineTable", Type, 0}, + {"LineTable.Data", Field, 0}, + {"LineTable.Line", Field, 0}, + {"LineTable.PC", Field, 0}, + {"NewLineTable", Func, 0}, + {"NewTable", Func, 0}, + {"Obj", Type, 0}, + {"Obj.Funcs", Field, 0}, + {"Obj.Paths", Field, 0}, + {"Sym", Type, 0}, + {"Sym.Func", Field, 0}, + {"Sym.GoType", Field, 0}, + {"Sym.Name", Field, 0}, + {"Sym.Type", Field, 0}, + {"Sym.Value", Field, 0}, + {"Table", Type, 0}, + {"Table.Files", Field, 0}, + {"Table.Funcs", Field, 0}, + {"Table.Objs", Field, 0}, + {"Table.Syms", Field, 0}, + {"UnknownFileError", Type, 0}, + {"UnknownLineError", Type, 0}, + {"UnknownLineError.File", Field, 0}, + {"UnknownLineError.Line", Field, 0}, + }, + "debug/macho": { + {"(*FatFile).Close", Method, 3}, + {"(*File).Close", Method, 0}, + {"(*File).DWARF", Method, 0}, + {"(*File).ImportedLibraries", Method, 0}, + {"(*File).ImportedSymbols", Method, 0}, + {"(*File).Section", Method, 0}, + {"(*File).Segment", Method, 0}, + {"(*FormatError).Error", Method, 0}, + {"(*Section).Data", Method, 0}, + {"(*Section).Open", Method, 0}, + {"(*Segment).Data", Method, 0}, + {"(*Segment).Open", Method, 0}, + {"(Cpu).GoString", Method, 0}, + {"(Cpu).String", Method, 0}, + {"(Dylib).Raw", Method, 0}, + {"(Dysymtab).Raw", Method, 0}, + {"(FatArch).Close", Method, 3}, + {"(FatArch).DWARF", Method, 3}, + {"(FatArch).ImportedLibraries", Method, 3}, + {"(FatArch).ImportedSymbols", Method, 3}, + {"(FatArch).Section", Method, 3}, + {"(FatArch).Segment", Method, 3}, + {"(LoadBytes).Raw", Method, 0}, + {"(LoadCmd).GoString", Method, 0}, + {"(LoadCmd).String", Method, 0}, + {"(RelocTypeARM).GoString", Method, 10}, + {"(RelocTypeARM).String", Method, 10}, + {"(RelocTypeARM64).GoString", Method, 10}, + {"(RelocTypeARM64).String", Method, 10}, + {"(RelocTypeGeneric).GoString", Method, 10}, + {"(RelocTypeGeneric).String", Method, 10}, + {"(RelocTypeX86_64).GoString", Method, 10}, + {"(RelocTypeX86_64).String", Method, 10}, + {"(Rpath).Raw", Method, 10}, + {"(Section).ReadAt", Method, 0}, + {"(Segment).Raw", Method, 0}, + {"(Segment).ReadAt", Method, 0}, + {"(Symtab).Raw", Method, 0}, + {"(Type).GoString", Method, 10}, + {"(Type).String", Method, 10}, + {"ARM64_RELOC_ADDEND", Const, 10}, + {"ARM64_RELOC_BRANCH26", Const, 10}, + {"ARM64_RELOC_GOT_LOAD_PAGE21", Const, 10}, + {"ARM64_RELOC_GOT_LOAD_PAGEOFF12", Const, 10}, + {"ARM64_RELOC_PAGE21", Const, 10}, + {"ARM64_RELOC_PAGEOFF12", Const, 10}, + {"ARM64_RELOC_POINTER_TO_GOT", Const, 10}, + {"ARM64_RELOC_SUBTRACTOR", Const, 10}, + {"ARM64_RELOC_TLVP_LOAD_PAGE21", Const, 10}, + {"ARM64_RELOC_TLVP_LOAD_PAGEOFF12", Const, 10}, + {"ARM64_RELOC_UNSIGNED", Const, 10}, + {"ARM_RELOC_BR24", Const, 10}, + {"ARM_RELOC_HALF", Const, 10}, + {"ARM_RELOC_HALF_SECTDIFF", Const, 10}, + {"ARM_RELOC_LOCAL_SECTDIFF", Const, 10}, + {"ARM_RELOC_PAIR", Const, 10}, + {"ARM_RELOC_PB_LA_PTR", Const, 10}, + {"ARM_RELOC_SECTDIFF", Const, 10}, + {"ARM_RELOC_VANILLA", Const, 10}, + {"ARM_THUMB_32BIT_BRANCH", Const, 10}, + {"ARM_THUMB_RELOC_BR22", Const, 10}, + {"Cpu", Type, 0}, + {"Cpu386", Const, 0}, + {"CpuAmd64", Const, 0}, + {"CpuArm", Const, 3}, + {"CpuArm64", Const, 11}, + {"CpuPpc", Const, 3}, + {"CpuPpc64", Const, 3}, + {"Dylib", Type, 0}, + {"Dylib.CompatVersion", Field, 0}, + {"Dylib.CurrentVersion", Field, 0}, + {"Dylib.LoadBytes", Field, 0}, + {"Dylib.Name", Field, 0}, + {"Dylib.Time", Field, 0}, + {"DylibCmd", Type, 0}, + {"DylibCmd.Cmd", Field, 0}, + {"DylibCmd.CompatVersion", Field, 0}, + {"DylibCmd.CurrentVersion", Field, 0}, + {"DylibCmd.Len", Field, 0}, + {"DylibCmd.Name", Field, 0}, + {"DylibCmd.Time", Field, 0}, + {"Dysymtab", Type, 0}, + {"Dysymtab.DysymtabCmd", Field, 0}, + {"Dysymtab.IndirectSyms", Field, 0}, + {"Dysymtab.LoadBytes", Field, 0}, + {"DysymtabCmd", Type, 0}, + {"DysymtabCmd.Cmd", Field, 0}, + {"DysymtabCmd.Extrefsymoff", Field, 0}, + {"DysymtabCmd.Extreloff", Field, 0}, + {"DysymtabCmd.Iextdefsym", Field, 0}, + {"DysymtabCmd.Ilocalsym", Field, 0}, + {"DysymtabCmd.Indirectsymoff", Field, 0}, + {"DysymtabCmd.Iundefsym", Field, 0}, + {"DysymtabCmd.Len", Field, 0}, + {"DysymtabCmd.Locreloff", Field, 0}, + {"DysymtabCmd.Modtaboff", Field, 0}, + {"DysymtabCmd.Nextdefsym", Field, 0}, + {"DysymtabCmd.Nextrefsyms", Field, 0}, + {"DysymtabCmd.Nextrel", Field, 0}, + {"DysymtabCmd.Nindirectsyms", Field, 0}, + {"DysymtabCmd.Nlocalsym", Field, 0}, + {"DysymtabCmd.Nlocrel", Field, 0}, + {"DysymtabCmd.Nmodtab", Field, 0}, + {"DysymtabCmd.Ntoc", Field, 0}, + {"DysymtabCmd.Nundefsym", Field, 0}, + {"DysymtabCmd.Tocoffset", Field, 0}, + {"ErrNotFat", Var, 3}, + {"FatArch", Type, 3}, + {"FatArch.FatArchHeader", Field, 3}, + {"FatArch.File", Field, 3}, + {"FatArchHeader", Type, 3}, + {"FatArchHeader.Align", Field, 3}, + {"FatArchHeader.Cpu", Field, 3}, + {"FatArchHeader.Offset", Field, 3}, + {"FatArchHeader.Size", Field, 3}, + {"FatArchHeader.SubCpu", Field, 3}, + {"FatFile", Type, 3}, + {"FatFile.Arches", Field, 3}, + {"FatFile.Magic", Field, 3}, + {"File", Type, 0}, + {"File.ByteOrder", Field, 0}, + {"File.Dysymtab", Field, 0}, + {"File.FileHeader", Field, 0}, + {"File.Loads", Field, 0}, + {"File.Sections", Field, 0}, + {"File.Symtab", Field, 0}, + {"FileHeader", Type, 0}, + {"FileHeader.Cmdsz", Field, 0}, + {"FileHeader.Cpu", Field, 0}, + {"FileHeader.Flags", Field, 0}, + {"FileHeader.Magic", Field, 0}, + {"FileHeader.Ncmd", Field, 0}, + {"FileHeader.SubCpu", Field, 0}, + {"FileHeader.Type", Field, 0}, + {"FlagAllModsBound", Const, 10}, + {"FlagAllowStackExecution", Const, 10}, + {"FlagAppExtensionSafe", Const, 10}, + {"FlagBindAtLoad", Const, 10}, + {"FlagBindsToWeak", Const, 10}, + {"FlagCanonical", Const, 10}, + {"FlagDeadStrippableDylib", Const, 10}, + {"FlagDyldLink", Const, 10}, + {"FlagForceFlat", Const, 10}, + {"FlagHasTLVDescriptors", Const, 10}, + {"FlagIncrLink", Const, 10}, + {"FlagLazyInit", Const, 10}, + {"FlagNoFixPrebinding", Const, 10}, + {"FlagNoHeapExecution", Const, 10}, + {"FlagNoMultiDefs", Const, 10}, + {"FlagNoReexportedDylibs", Const, 10}, + {"FlagNoUndefs", Const, 10}, + {"FlagPIE", Const, 10}, + {"FlagPrebindable", Const, 10}, + {"FlagPrebound", Const, 10}, + {"FlagRootSafe", Const, 10}, + {"FlagSetuidSafe", Const, 10}, + {"FlagSplitSegs", Const, 10}, + {"FlagSubsectionsViaSymbols", Const, 10}, + {"FlagTwoLevel", Const, 10}, + {"FlagWeakDefines", Const, 10}, + {"FormatError", Type, 0}, + {"GENERIC_RELOC_LOCAL_SECTDIFF", Const, 10}, + {"GENERIC_RELOC_PAIR", Const, 10}, + {"GENERIC_RELOC_PB_LA_PTR", Const, 10}, + {"GENERIC_RELOC_SECTDIFF", Const, 10}, + {"GENERIC_RELOC_TLV", Const, 10}, + {"GENERIC_RELOC_VANILLA", Const, 10}, + {"Load", Type, 0}, + {"LoadBytes", Type, 0}, + {"LoadCmd", Type, 0}, + {"LoadCmdDylib", Const, 0}, + {"LoadCmdDylinker", Const, 0}, + {"LoadCmdDysymtab", Const, 0}, + {"LoadCmdRpath", Const, 10}, + {"LoadCmdSegment", Const, 0}, + {"LoadCmdSegment64", Const, 0}, + {"LoadCmdSymtab", Const, 0}, + {"LoadCmdThread", Const, 0}, + {"LoadCmdUnixThread", Const, 0}, + {"Magic32", Const, 0}, + {"Magic64", Const, 0}, + {"MagicFat", Const, 3}, + {"NewFatFile", Func, 3}, + {"NewFile", Func, 0}, + {"Nlist32", Type, 0}, + {"Nlist32.Desc", Field, 0}, + {"Nlist32.Name", Field, 0}, + {"Nlist32.Sect", Field, 0}, + {"Nlist32.Type", Field, 0}, + {"Nlist32.Value", Field, 0}, + {"Nlist64", Type, 0}, + {"Nlist64.Desc", Field, 0}, + {"Nlist64.Name", Field, 0}, + {"Nlist64.Sect", Field, 0}, + {"Nlist64.Type", Field, 0}, + {"Nlist64.Value", Field, 0}, + {"Open", Func, 0}, + {"OpenFat", Func, 3}, + {"Regs386", Type, 0}, + {"Regs386.AX", Field, 0}, + {"Regs386.BP", Field, 0}, + {"Regs386.BX", Field, 0}, + {"Regs386.CS", Field, 0}, + {"Regs386.CX", Field, 0}, + {"Regs386.DI", Field, 0}, + {"Regs386.DS", Field, 0}, + {"Regs386.DX", Field, 0}, + {"Regs386.ES", Field, 0}, + {"Regs386.FLAGS", Field, 0}, + {"Regs386.FS", Field, 0}, + {"Regs386.GS", Field, 0}, + {"Regs386.IP", Field, 0}, + {"Regs386.SI", Field, 0}, + {"Regs386.SP", Field, 0}, + {"Regs386.SS", Field, 0}, + {"RegsAMD64", Type, 0}, + {"RegsAMD64.AX", Field, 0}, + {"RegsAMD64.BP", Field, 0}, + {"RegsAMD64.BX", Field, 0}, + {"RegsAMD64.CS", Field, 0}, + {"RegsAMD64.CX", Field, 0}, + {"RegsAMD64.DI", Field, 0}, + {"RegsAMD64.DX", Field, 0}, + {"RegsAMD64.FLAGS", Field, 0}, + {"RegsAMD64.FS", Field, 0}, + {"RegsAMD64.GS", Field, 0}, + {"RegsAMD64.IP", Field, 0}, + {"RegsAMD64.R10", Field, 0}, + {"RegsAMD64.R11", Field, 0}, + {"RegsAMD64.R12", Field, 0}, + {"RegsAMD64.R13", Field, 0}, + {"RegsAMD64.R14", Field, 0}, + {"RegsAMD64.R15", Field, 0}, + {"RegsAMD64.R8", Field, 0}, + {"RegsAMD64.R9", Field, 0}, + {"RegsAMD64.SI", Field, 0}, + {"RegsAMD64.SP", Field, 0}, + {"Reloc", Type, 10}, + {"Reloc.Addr", Field, 10}, + {"Reloc.Extern", Field, 10}, + {"Reloc.Len", Field, 10}, + {"Reloc.Pcrel", Field, 10}, + {"Reloc.Scattered", Field, 10}, + {"Reloc.Type", Field, 10}, + {"Reloc.Value", Field, 10}, + {"RelocTypeARM", Type, 10}, + {"RelocTypeARM64", Type, 10}, + {"RelocTypeGeneric", Type, 10}, + {"RelocTypeX86_64", Type, 10}, + {"Rpath", Type, 10}, + {"Rpath.LoadBytes", Field, 10}, + {"Rpath.Path", Field, 10}, + {"RpathCmd", Type, 10}, + {"RpathCmd.Cmd", Field, 10}, + {"RpathCmd.Len", Field, 10}, + {"RpathCmd.Path", Field, 10}, + {"Section", Type, 0}, + {"Section.ReaderAt", Field, 0}, + {"Section.Relocs", Field, 10}, + {"Section.SectionHeader", Field, 0}, + {"Section32", Type, 0}, + {"Section32.Addr", Field, 0}, + {"Section32.Align", Field, 0}, + {"Section32.Flags", Field, 0}, + {"Section32.Name", Field, 0}, + {"Section32.Nreloc", Field, 0}, + {"Section32.Offset", Field, 0}, + {"Section32.Reloff", Field, 0}, + {"Section32.Reserve1", Field, 0}, + {"Section32.Reserve2", Field, 0}, + {"Section32.Seg", Field, 0}, + {"Section32.Size", Field, 0}, + {"Section64", Type, 0}, + {"Section64.Addr", Field, 0}, + {"Section64.Align", Field, 0}, + {"Section64.Flags", Field, 0}, + {"Section64.Name", Field, 0}, + {"Section64.Nreloc", Field, 0}, + {"Section64.Offset", Field, 0}, + {"Section64.Reloff", Field, 0}, + {"Section64.Reserve1", Field, 0}, + {"Section64.Reserve2", Field, 0}, + {"Section64.Reserve3", Field, 0}, + {"Section64.Seg", Field, 0}, + {"Section64.Size", Field, 0}, + {"SectionHeader", Type, 0}, + {"SectionHeader.Addr", Field, 0}, + {"SectionHeader.Align", Field, 0}, + {"SectionHeader.Flags", Field, 0}, + {"SectionHeader.Name", Field, 0}, + {"SectionHeader.Nreloc", Field, 0}, + {"SectionHeader.Offset", Field, 0}, + {"SectionHeader.Reloff", Field, 0}, + {"SectionHeader.Seg", Field, 0}, + {"SectionHeader.Size", Field, 0}, + {"Segment", Type, 0}, + {"Segment.LoadBytes", Field, 0}, + {"Segment.ReaderAt", Field, 0}, + {"Segment.SegmentHeader", Field, 0}, + {"Segment32", Type, 0}, + {"Segment32.Addr", Field, 0}, + {"Segment32.Cmd", Field, 0}, + {"Segment32.Filesz", Field, 0}, + {"Segment32.Flag", Field, 0}, + {"Segment32.Len", Field, 0}, + {"Segment32.Maxprot", Field, 0}, + {"Segment32.Memsz", Field, 0}, + {"Segment32.Name", Field, 0}, + {"Segment32.Nsect", Field, 0}, + {"Segment32.Offset", Field, 0}, + {"Segment32.Prot", Field, 0}, + {"Segment64", Type, 0}, + {"Segment64.Addr", Field, 0}, + {"Segment64.Cmd", Field, 0}, + {"Segment64.Filesz", Field, 0}, + {"Segment64.Flag", Field, 0}, + {"Segment64.Len", Field, 0}, + {"Segment64.Maxprot", Field, 0}, + {"Segment64.Memsz", Field, 0}, + {"Segment64.Name", Field, 0}, + {"Segment64.Nsect", Field, 0}, + {"Segment64.Offset", Field, 0}, + {"Segment64.Prot", Field, 0}, + {"SegmentHeader", Type, 0}, + {"SegmentHeader.Addr", Field, 0}, + {"SegmentHeader.Cmd", Field, 0}, + {"SegmentHeader.Filesz", Field, 0}, + {"SegmentHeader.Flag", Field, 0}, + {"SegmentHeader.Len", Field, 0}, + {"SegmentHeader.Maxprot", Field, 0}, + {"SegmentHeader.Memsz", Field, 0}, + {"SegmentHeader.Name", Field, 0}, + {"SegmentHeader.Nsect", Field, 0}, + {"SegmentHeader.Offset", Field, 0}, + {"SegmentHeader.Prot", Field, 0}, + {"Symbol", Type, 0}, + {"Symbol.Desc", Field, 0}, + {"Symbol.Name", Field, 0}, + {"Symbol.Sect", Field, 0}, + {"Symbol.Type", Field, 0}, + {"Symbol.Value", Field, 0}, + {"Symtab", Type, 0}, + {"Symtab.LoadBytes", Field, 0}, + {"Symtab.Syms", Field, 0}, + {"Symtab.SymtabCmd", Field, 0}, + {"SymtabCmd", Type, 0}, + {"SymtabCmd.Cmd", Field, 0}, + {"SymtabCmd.Len", Field, 0}, + {"SymtabCmd.Nsyms", Field, 0}, + {"SymtabCmd.Stroff", Field, 0}, + {"SymtabCmd.Strsize", Field, 0}, + {"SymtabCmd.Symoff", Field, 0}, + {"Thread", Type, 0}, + {"Thread.Cmd", Field, 0}, + {"Thread.Data", Field, 0}, + {"Thread.Len", Field, 0}, + {"Thread.Type", Field, 0}, + {"Type", Type, 0}, + {"TypeBundle", Const, 3}, + {"TypeDylib", Const, 3}, + {"TypeExec", Const, 0}, + {"TypeObj", Const, 0}, + {"X86_64_RELOC_BRANCH", Const, 10}, + {"X86_64_RELOC_GOT", Const, 10}, + {"X86_64_RELOC_GOT_LOAD", Const, 10}, + {"X86_64_RELOC_SIGNED", Const, 10}, + {"X86_64_RELOC_SIGNED_1", Const, 10}, + {"X86_64_RELOC_SIGNED_2", Const, 10}, + {"X86_64_RELOC_SIGNED_4", Const, 10}, + {"X86_64_RELOC_SUBTRACTOR", Const, 10}, + {"X86_64_RELOC_TLV", Const, 10}, + {"X86_64_RELOC_UNSIGNED", Const, 10}, + }, + "debug/pe": { + {"(*COFFSymbol).FullName", Method, 8}, + {"(*File).COFFSymbolReadSectionDefAux", Method, 19}, + {"(*File).Close", Method, 0}, + {"(*File).DWARF", Method, 0}, + {"(*File).ImportedLibraries", Method, 0}, + {"(*File).ImportedSymbols", Method, 0}, + {"(*File).Section", Method, 0}, + {"(*FormatError).Error", Method, 0}, + {"(*Section).Data", Method, 0}, + {"(*Section).Open", Method, 0}, + {"(Section).ReadAt", Method, 0}, + {"(StringTable).String", Method, 8}, + {"COFFSymbol", Type, 1}, + {"COFFSymbol.Name", Field, 1}, + {"COFFSymbol.NumberOfAuxSymbols", Field, 1}, + {"COFFSymbol.SectionNumber", Field, 1}, + {"COFFSymbol.StorageClass", Field, 1}, + {"COFFSymbol.Type", Field, 1}, + {"COFFSymbol.Value", Field, 1}, + {"COFFSymbolAuxFormat5", Type, 19}, + {"COFFSymbolAuxFormat5.Checksum", Field, 19}, + {"COFFSymbolAuxFormat5.NumLineNumbers", Field, 19}, + {"COFFSymbolAuxFormat5.NumRelocs", Field, 19}, + {"COFFSymbolAuxFormat5.SecNum", Field, 19}, + {"COFFSymbolAuxFormat5.Selection", Field, 19}, + {"COFFSymbolAuxFormat5.Size", Field, 19}, + {"COFFSymbolSize", Const, 1}, + {"DataDirectory", Type, 3}, + {"DataDirectory.Size", Field, 3}, + {"DataDirectory.VirtualAddress", Field, 3}, + {"File", Type, 0}, + {"File.COFFSymbols", Field, 8}, + {"File.FileHeader", Field, 0}, + {"File.OptionalHeader", Field, 3}, + {"File.Sections", Field, 0}, + {"File.StringTable", Field, 8}, + {"File.Symbols", Field, 1}, + {"FileHeader", Type, 0}, + {"FileHeader.Characteristics", Field, 0}, + {"FileHeader.Machine", Field, 0}, + {"FileHeader.NumberOfSections", Field, 0}, + {"FileHeader.NumberOfSymbols", Field, 0}, + {"FileHeader.PointerToSymbolTable", Field, 0}, + {"FileHeader.SizeOfOptionalHeader", Field, 0}, + {"FileHeader.TimeDateStamp", Field, 0}, + {"FormatError", Type, 0}, + {"IMAGE_COMDAT_SELECT_ANY", Const, 19}, + {"IMAGE_COMDAT_SELECT_ASSOCIATIVE", Const, 19}, + {"IMAGE_COMDAT_SELECT_EXACT_MATCH", Const, 19}, + {"IMAGE_COMDAT_SELECT_LARGEST", Const, 19}, + {"IMAGE_COMDAT_SELECT_NODUPLICATES", Const, 19}, + {"IMAGE_COMDAT_SELECT_SAME_SIZE", Const, 19}, + {"IMAGE_DIRECTORY_ENTRY_ARCHITECTURE", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_BASERELOC", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_DEBUG", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_EXCEPTION", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_EXPORT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_GLOBALPTR", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_IAT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_IMPORT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_RESOURCE", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_SECURITY", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_TLS", Const, 11}, + {"IMAGE_DLLCHARACTERISTICS_APPCONTAINER", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_GUARD_CF", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_NO_BIND", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_NO_ISOLATION", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_NO_SEH", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_NX_COMPAT", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_WDM_DRIVER", Const, 15}, + {"IMAGE_FILE_32BIT_MACHINE", Const, 15}, + {"IMAGE_FILE_AGGRESIVE_WS_TRIM", Const, 15}, + {"IMAGE_FILE_BYTES_REVERSED_HI", Const, 15}, + {"IMAGE_FILE_BYTES_REVERSED_LO", Const, 15}, + {"IMAGE_FILE_DEBUG_STRIPPED", Const, 15}, + {"IMAGE_FILE_DLL", Const, 15}, + {"IMAGE_FILE_EXECUTABLE_IMAGE", Const, 15}, + {"IMAGE_FILE_LARGE_ADDRESS_AWARE", Const, 15}, + {"IMAGE_FILE_LINE_NUMS_STRIPPED", Const, 15}, + {"IMAGE_FILE_LOCAL_SYMS_STRIPPED", Const, 15}, + {"IMAGE_FILE_MACHINE_AM33", Const, 0}, + {"IMAGE_FILE_MACHINE_AMD64", Const, 0}, + {"IMAGE_FILE_MACHINE_ARM", Const, 0}, + {"IMAGE_FILE_MACHINE_ARM64", Const, 11}, + {"IMAGE_FILE_MACHINE_ARMNT", Const, 12}, + {"IMAGE_FILE_MACHINE_EBC", Const, 0}, + {"IMAGE_FILE_MACHINE_I386", Const, 0}, + {"IMAGE_FILE_MACHINE_IA64", Const, 0}, + {"IMAGE_FILE_MACHINE_LOONGARCH32", Const, 19}, + {"IMAGE_FILE_MACHINE_LOONGARCH64", Const, 19}, + {"IMAGE_FILE_MACHINE_M32R", Const, 0}, + {"IMAGE_FILE_MACHINE_MIPS16", Const, 0}, + {"IMAGE_FILE_MACHINE_MIPSFPU", Const, 0}, + {"IMAGE_FILE_MACHINE_MIPSFPU16", Const, 0}, + {"IMAGE_FILE_MACHINE_POWERPC", Const, 0}, + {"IMAGE_FILE_MACHINE_POWERPCFP", Const, 0}, + {"IMAGE_FILE_MACHINE_R4000", Const, 0}, + {"IMAGE_FILE_MACHINE_RISCV128", Const, 20}, + {"IMAGE_FILE_MACHINE_RISCV32", Const, 20}, + {"IMAGE_FILE_MACHINE_RISCV64", Const, 20}, + {"IMAGE_FILE_MACHINE_SH3", Const, 0}, + {"IMAGE_FILE_MACHINE_SH3DSP", Const, 0}, + {"IMAGE_FILE_MACHINE_SH4", Const, 0}, + {"IMAGE_FILE_MACHINE_SH5", Const, 0}, + {"IMAGE_FILE_MACHINE_THUMB", Const, 0}, + {"IMAGE_FILE_MACHINE_UNKNOWN", Const, 0}, + {"IMAGE_FILE_MACHINE_WCEMIPSV2", Const, 0}, + {"IMAGE_FILE_NET_RUN_FROM_SWAP", Const, 15}, + {"IMAGE_FILE_RELOCS_STRIPPED", Const, 15}, + {"IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP", Const, 15}, + {"IMAGE_FILE_SYSTEM", Const, 15}, + {"IMAGE_FILE_UP_SYSTEM_ONLY", Const, 15}, + {"IMAGE_SCN_CNT_CODE", Const, 19}, + {"IMAGE_SCN_CNT_INITIALIZED_DATA", Const, 19}, + {"IMAGE_SCN_CNT_UNINITIALIZED_DATA", Const, 19}, + {"IMAGE_SCN_LNK_COMDAT", Const, 19}, + {"IMAGE_SCN_MEM_DISCARDABLE", Const, 19}, + {"IMAGE_SCN_MEM_EXECUTE", Const, 19}, + {"IMAGE_SCN_MEM_READ", Const, 19}, + {"IMAGE_SCN_MEM_WRITE", Const, 19}, + {"IMAGE_SUBSYSTEM_EFI_APPLICATION", Const, 15}, + {"IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", Const, 15}, + {"IMAGE_SUBSYSTEM_EFI_ROM", Const, 15}, + {"IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER", Const, 15}, + {"IMAGE_SUBSYSTEM_NATIVE", Const, 15}, + {"IMAGE_SUBSYSTEM_NATIVE_WINDOWS", Const, 15}, + {"IMAGE_SUBSYSTEM_OS2_CUI", Const, 15}, + {"IMAGE_SUBSYSTEM_POSIX_CUI", Const, 15}, + {"IMAGE_SUBSYSTEM_UNKNOWN", Const, 15}, + {"IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION", Const, 15}, + {"IMAGE_SUBSYSTEM_WINDOWS_CE_GUI", Const, 15}, + {"IMAGE_SUBSYSTEM_WINDOWS_CUI", Const, 15}, + {"IMAGE_SUBSYSTEM_WINDOWS_GUI", Const, 15}, + {"IMAGE_SUBSYSTEM_XBOX", Const, 15}, + {"ImportDirectory", Type, 0}, + {"ImportDirectory.FirstThunk", Field, 0}, + {"ImportDirectory.ForwarderChain", Field, 0}, + {"ImportDirectory.Name", Field, 0}, + {"ImportDirectory.OriginalFirstThunk", Field, 0}, + {"ImportDirectory.TimeDateStamp", Field, 0}, + {"NewFile", Func, 0}, + {"Open", Func, 0}, + {"OptionalHeader32", Type, 3}, + {"OptionalHeader32.AddressOfEntryPoint", Field, 3}, + {"OptionalHeader32.BaseOfCode", Field, 3}, + {"OptionalHeader32.BaseOfData", Field, 3}, + {"OptionalHeader32.CheckSum", Field, 3}, + {"OptionalHeader32.DataDirectory", Field, 3}, + {"OptionalHeader32.DllCharacteristics", Field, 3}, + {"OptionalHeader32.FileAlignment", Field, 3}, + {"OptionalHeader32.ImageBase", Field, 3}, + {"OptionalHeader32.LoaderFlags", Field, 3}, + {"OptionalHeader32.Magic", Field, 3}, + {"OptionalHeader32.MajorImageVersion", Field, 3}, + {"OptionalHeader32.MajorLinkerVersion", Field, 3}, + {"OptionalHeader32.MajorOperatingSystemVersion", Field, 3}, + {"OptionalHeader32.MajorSubsystemVersion", Field, 3}, + {"OptionalHeader32.MinorImageVersion", Field, 3}, + {"OptionalHeader32.MinorLinkerVersion", Field, 3}, + {"OptionalHeader32.MinorOperatingSystemVersion", Field, 3}, + {"OptionalHeader32.MinorSubsystemVersion", Field, 3}, + {"OptionalHeader32.NumberOfRvaAndSizes", Field, 3}, + {"OptionalHeader32.SectionAlignment", Field, 3}, + {"OptionalHeader32.SizeOfCode", Field, 3}, + {"OptionalHeader32.SizeOfHeaders", Field, 3}, + {"OptionalHeader32.SizeOfHeapCommit", Field, 3}, + {"OptionalHeader32.SizeOfHeapReserve", Field, 3}, + {"OptionalHeader32.SizeOfImage", Field, 3}, + {"OptionalHeader32.SizeOfInitializedData", Field, 3}, + {"OptionalHeader32.SizeOfStackCommit", Field, 3}, + {"OptionalHeader32.SizeOfStackReserve", Field, 3}, + {"OptionalHeader32.SizeOfUninitializedData", Field, 3}, + {"OptionalHeader32.Subsystem", Field, 3}, + {"OptionalHeader32.Win32VersionValue", Field, 3}, + {"OptionalHeader64", Type, 3}, + {"OptionalHeader64.AddressOfEntryPoint", Field, 3}, + {"OptionalHeader64.BaseOfCode", Field, 3}, + {"OptionalHeader64.CheckSum", Field, 3}, + {"OptionalHeader64.DataDirectory", Field, 3}, + {"OptionalHeader64.DllCharacteristics", Field, 3}, + {"OptionalHeader64.FileAlignment", Field, 3}, + {"OptionalHeader64.ImageBase", Field, 3}, + {"OptionalHeader64.LoaderFlags", Field, 3}, + {"OptionalHeader64.Magic", Field, 3}, + {"OptionalHeader64.MajorImageVersion", Field, 3}, + {"OptionalHeader64.MajorLinkerVersion", Field, 3}, + {"OptionalHeader64.MajorOperatingSystemVersion", Field, 3}, + {"OptionalHeader64.MajorSubsystemVersion", Field, 3}, + {"OptionalHeader64.MinorImageVersion", Field, 3}, + {"OptionalHeader64.MinorLinkerVersion", Field, 3}, + {"OptionalHeader64.MinorOperatingSystemVersion", Field, 3}, + {"OptionalHeader64.MinorSubsystemVersion", Field, 3}, + {"OptionalHeader64.NumberOfRvaAndSizes", Field, 3}, + {"OptionalHeader64.SectionAlignment", Field, 3}, + {"OptionalHeader64.SizeOfCode", Field, 3}, + {"OptionalHeader64.SizeOfHeaders", Field, 3}, + {"OptionalHeader64.SizeOfHeapCommit", Field, 3}, + {"OptionalHeader64.SizeOfHeapReserve", Field, 3}, + {"OptionalHeader64.SizeOfImage", Field, 3}, + {"OptionalHeader64.SizeOfInitializedData", Field, 3}, + {"OptionalHeader64.SizeOfStackCommit", Field, 3}, + {"OptionalHeader64.SizeOfStackReserve", Field, 3}, + {"OptionalHeader64.SizeOfUninitializedData", Field, 3}, + {"OptionalHeader64.Subsystem", Field, 3}, + {"OptionalHeader64.Win32VersionValue", Field, 3}, + {"Reloc", Type, 8}, + {"Reloc.SymbolTableIndex", Field, 8}, + {"Reloc.Type", Field, 8}, + {"Reloc.VirtualAddress", Field, 8}, + {"Section", Type, 0}, + {"Section.ReaderAt", Field, 0}, + {"Section.Relocs", Field, 8}, + {"Section.SectionHeader", Field, 0}, + {"SectionHeader", Type, 0}, + {"SectionHeader.Characteristics", Field, 0}, + {"SectionHeader.Name", Field, 0}, + {"SectionHeader.NumberOfLineNumbers", Field, 0}, + {"SectionHeader.NumberOfRelocations", Field, 0}, + {"SectionHeader.Offset", Field, 0}, + {"SectionHeader.PointerToLineNumbers", Field, 0}, + {"SectionHeader.PointerToRelocations", Field, 0}, + {"SectionHeader.Size", Field, 0}, + {"SectionHeader.VirtualAddress", Field, 0}, + {"SectionHeader.VirtualSize", Field, 0}, + {"SectionHeader32", Type, 0}, + {"SectionHeader32.Characteristics", Field, 0}, + {"SectionHeader32.Name", Field, 0}, + {"SectionHeader32.NumberOfLineNumbers", Field, 0}, + {"SectionHeader32.NumberOfRelocations", Field, 0}, + {"SectionHeader32.PointerToLineNumbers", Field, 0}, + {"SectionHeader32.PointerToRawData", Field, 0}, + {"SectionHeader32.PointerToRelocations", Field, 0}, + {"SectionHeader32.SizeOfRawData", Field, 0}, + {"SectionHeader32.VirtualAddress", Field, 0}, + {"SectionHeader32.VirtualSize", Field, 0}, + {"StringTable", Type, 8}, + {"Symbol", Type, 1}, + {"Symbol.Name", Field, 1}, + {"Symbol.SectionNumber", Field, 1}, + {"Symbol.StorageClass", Field, 1}, + {"Symbol.Type", Field, 1}, + {"Symbol.Value", Field, 1}, + }, + "debug/plan9obj": { + {"(*File).Close", Method, 3}, + {"(*File).Section", Method, 3}, + {"(*File).Symbols", Method, 3}, + {"(*Section).Data", Method, 3}, + {"(*Section).Open", Method, 3}, + {"(Section).ReadAt", Method, 3}, + {"ErrNoSymbols", Var, 18}, + {"File", Type, 3}, + {"File.FileHeader", Field, 3}, + {"File.Sections", Field, 3}, + {"FileHeader", Type, 3}, + {"FileHeader.Bss", Field, 3}, + {"FileHeader.Entry", Field, 3}, + {"FileHeader.HdrSize", Field, 4}, + {"FileHeader.LoadAddress", Field, 4}, + {"FileHeader.Magic", Field, 3}, + {"FileHeader.PtrSize", Field, 3}, + {"Magic386", Const, 3}, + {"Magic64", Const, 3}, + {"MagicAMD64", Const, 3}, + {"MagicARM", Const, 3}, + {"NewFile", Func, 3}, + {"Open", Func, 3}, + {"Section", Type, 3}, + {"Section.ReaderAt", Field, 3}, + {"Section.SectionHeader", Field, 3}, + {"SectionHeader", Type, 3}, + {"SectionHeader.Name", Field, 3}, + {"SectionHeader.Offset", Field, 3}, + {"SectionHeader.Size", Field, 3}, + {"Sym", Type, 3}, + {"Sym.Name", Field, 3}, + {"Sym.Type", Field, 3}, + {"Sym.Value", Field, 3}, + }, + "embed": { + {"(FS).Open", Method, 16}, + {"(FS).ReadDir", Method, 16}, + {"(FS).ReadFile", Method, 16}, + {"FS", Type, 16}, + }, + "encoding": { + {"BinaryMarshaler", Type, 2}, + {"BinaryUnmarshaler", Type, 2}, + {"TextMarshaler", Type, 2}, + {"TextUnmarshaler", Type, 2}, + }, + "encoding/ascii85": { + {"(CorruptInputError).Error", Method, 0}, + {"CorruptInputError", Type, 0}, + {"Decode", Func, 0}, + {"Encode", Func, 0}, + {"MaxEncodedLen", Func, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + }, + "encoding/asn1": { + {"(BitString).At", Method, 0}, + {"(BitString).RightAlign", Method, 0}, + {"(ObjectIdentifier).Equal", Method, 0}, + {"(ObjectIdentifier).String", Method, 3}, + {"(StructuralError).Error", Method, 0}, + {"(SyntaxError).Error", Method, 0}, + {"BitString", Type, 0}, + {"BitString.BitLength", Field, 0}, + {"BitString.Bytes", Field, 0}, + {"ClassApplication", Const, 6}, + {"ClassContextSpecific", Const, 6}, + {"ClassPrivate", Const, 6}, + {"ClassUniversal", Const, 6}, + {"Enumerated", Type, 0}, + {"Flag", Type, 0}, + {"Marshal", Func, 0}, + {"MarshalWithParams", Func, 10}, + {"NullBytes", Var, 9}, + {"NullRawValue", Var, 9}, + {"ObjectIdentifier", Type, 0}, + {"RawContent", Type, 0}, + {"RawValue", Type, 0}, + {"RawValue.Bytes", Field, 0}, + {"RawValue.Class", Field, 0}, + {"RawValue.FullBytes", Field, 0}, + {"RawValue.IsCompound", Field, 0}, + {"RawValue.Tag", Field, 0}, + {"StructuralError", Type, 0}, + {"StructuralError.Msg", Field, 0}, + {"SyntaxError", Type, 0}, + {"SyntaxError.Msg", Field, 0}, + {"TagBMPString", Const, 14}, + {"TagBitString", Const, 6}, + {"TagBoolean", Const, 6}, + {"TagEnum", Const, 6}, + {"TagGeneralString", Const, 6}, + {"TagGeneralizedTime", Const, 6}, + {"TagIA5String", Const, 6}, + {"TagInteger", Const, 6}, + {"TagNull", Const, 9}, + {"TagNumericString", Const, 10}, + {"TagOID", Const, 6}, + {"TagOctetString", Const, 6}, + {"TagPrintableString", Const, 6}, + {"TagSequence", Const, 6}, + {"TagSet", Const, 6}, + {"TagT61String", Const, 6}, + {"TagUTCTime", Const, 6}, + {"TagUTF8String", Const, 6}, + {"Unmarshal", Func, 0}, + {"UnmarshalWithParams", Func, 0}, + }, + "encoding/base32": { + {"(*Encoding).AppendDecode", Method, 22}, + {"(*Encoding).AppendEncode", Method, 22}, + {"(*Encoding).Decode", Method, 0}, + {"(*Encoding).DecodeString", Method, 0}, + {"(*Encoding).DecodedLen", Method, 0}, + {"(*Encoding).Encode", Method, 0}, + {"(*Encoding).EncodeToString", Method, 0}, + {"(*Encoding).EncodedLen", Method, 0}, + {"(CorruptInputError).Error", Method, 0}, + {"(Encoding).WithPadding", Method, 9}, + {"CorruptInputError", Type, 0}, + {"Encoding", Type, 0}, + {"HexEncoding", Var, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"NewEncoding", Func, 0}, + {"NoPadding", Const, 9}, + {"StdEncoding", Var, 0}, + {"StdPadding", Const, 9}, + }, + "encoding/base64": { + {"(*Encoding).AppendDecode", Method, 22}, + {"(*Encoding).AppendEncode", Method, 22}, + {"(*Encoding).Decode", Method, 0}, + {"(*Encoding).DecodeString", Method, 0}, + {"(*Encoding).DecodedLen", Method, 0}, + {"(*Encoding).Encode", Method, 0}, + {"(*Encoding).EncodeToString", Method, 0}, + {"(*Encoding).EncodedLen", Method, 0}, + {"(CorruptInputError).Error", Method, 0}, + {"(Encoding).Strict", Method, 8}, + {"(Encoding).WithPadding", Method, 5}, + {"CorruptInputError", Type, 0}, + {"Encoding", Type, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"NewEncoding", Func, 0}, + {"NoPadding", Const, 5}, + {"RawStdEncoding", Var, 5}, + {"RawURLEncoding", Var, 5}, + {"StdEncoding", Var, 0}, + {"StdPadding", Const, 5}, + {"URLEncoding", Var, 0}, + }, + "encoding/binary": { + {"AppendByteOrder", Type, 19}, + {"AppendUvarint", Func, 19}, + {"AppendVarint", Func, 19}, + {"BigEndian", Var, 0}, + {"ByteOrder", Type, 0}, + {"LittleEndian", Var, 0}, + {"MaxVarintLen16", Const, 0}, + {"MaxVarintLen32", Const, 0}, + {"MaxVarintLen64", Const, 0}, + {"NativeEndian", Var, 21}, + {"PutUvarint", Func, 0}, + {"PutVarint", Func, 0}, + {"Read", Func, 0}, + {"ReadUvarint", Func, 0}, + {"ReadVarint", Func, 0}, + {"Size", Func, 0}, + {"Uvarint", Func, 0}, + {"Varint", Func, 0}, + {"Write", Func, 0}, + }, + "encoding/csv": { + {"(*ParseError).Error", Method, 0}, + {"(*ParseError).Unwrap", Method, 13}, + {"(*Reader).FieldPos", Method, 17}, + {"(*Reader).InputOffset", Method, 19}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).ReadAll", Method, 0}, + {"(*Writer).Error", Method, 1}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Write", Method, 0}, + {"(*Writer).WriteAll", Method, 0}, + {"ErrBareQuote", Var, 0}, + {"ErrFieldCount", Var, 0}, + {"ErrQuote", Var, 0}, + {"ErrTrailingComma", Var, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"ParseError", Type, 0}, + {"ParseError.Column", Field, 0}, + {"ParseError.Err", Field, 0}, + {"ParseError.Line", Field, 0}, + {"ParseError.StartLine", Field, 10}, + {"Reader", Type, 0}, + {"Reader.Comma", Field, 0}, + {"Reader.Comment", Field, 0}, + {"Reader.FieldsPerRecord", Field, 0}, + {"Reader.LazyQuotes", Field, 0}, + {"Reader.ReuseRecord", Field, 9}, + {"Reader.TrailingComma", Field, 0}, + {"Reader.TrimLeadingSpace", Field, 0}, + {"Writer", Type, 0}, + {"Writer.Comma", Field, 0}, + {"Writer.UseCRLF", Field, 0}, + }, + "encoding/gob": { + {"(*Decoder).Decode", Method, 0}, + {"(*Decoder).DecodeValue", Method, 0}, + {"(*Encoder).Encode", Method, 0}, + {"(*Encoder).EncodeValue", Method, 0}, + {"CommonType", Type, 0}, + {"CommonType.Id", Field, 0}, + {"CommonType.Name", Field, 0}, + {"Decoder", Type, 0}, + {"Encoder", Type, 0}, + {"GobDecoder", Type, 0}, + {"GobEncoder", Type, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"Register", Func, 0}, + {"RegisterName", Func, 0}, + }, + "encoding/hex": { + {"(InvalidByteError).Error", Method, 0}, + {"AppendDecode", Func, 22}, + {"AppendEncode", Func, 22}, + {"Decode", Func, 0}, + {"DecodeString", Func, 0}, + {"DecodedLen", Func, 0}, + {"Dump", Func, 0}, + {"Dumper", Func, 0}, + {"Encode", Func, 0}, + {"EncodeToString", Func, 0}, + {"EncodedLen", Func, 0}, + {"ErrLength", Var, 0}, + {"InvalidByteError", Type, 0}, + {"NewDecoder", Func, 10}, + {"NewEncoder", Func, 10}, + }, + "encoding/json": { + {"(*Decoder).Buffered", Method, 1}, + {"(*Decoder).Decode", Method, 0}, + {"(*Decoder).DisallowUnknownFields", Method, 10}, + {"(*Decoder).InputOffset", Method, 14}, + {"(*Decoder).More", Method, 5}, + {"(*Decoder).Token", Method, 5}, + {"(*Decoder).UseNumber", Method, 1}, + {"(*Encoder).Encode", Method, 0}, + {"(*Encoder).SetEscapeHTML", Method, 7}, + {"(*Encoder).SetIndent", Method, 7}, + {"(*InvalidUTF8Error).Error", Method, 0}, + {"(*InvalidUnmarshalError).Error", Method, 0}, + {"(*MarshalerError).Error", Method, 0}, + {"(*MarshalerError).Unwrap", Method, 13}, + {"(*RawMessage).MarshalJSON", Method, 0}, + {"(*RawMessage).UnmarshalJSON", Method, 0}, + {"(*SyntaxError).Error", Method, 0}, + {"(*UnmarshalFieldError).Error", Method, 0}, + {"(*UnmarshalTypeError).Error", Method, 0}, + {"(*UnsupportedTypeError).Error", Method, 0}, + {"(*UnsupportedValueError).Error", Method, 0}, + {"(Delim).String", Method, 5}, + {"(Number).Float64", Method, 1}, + {"(Number).Int64", Method, 1}, + {"(Number).String", Method, 1}, + {"(RawMessage).MarshalJSON", Method, 8}, + {"Compact", Func, 0}, + {"Decoder", Type, 0}, + {"Delim", Type, 5}, + {"Encoder", Type, 0}, + {"HTMLEscape", Func, 0}, + {"Indent", Func, 0}, + {"InvalidUTF8Error", Type, 0}, + {"InvalidUTF8Error.S", Field, 0}, + {"InvalidUnmarshalError", Type, 0}, + {"InvalidUnmarshalError.Type", Field, 0}, + {"Marshal", Func, 0}, + {"MarshalIndent", Func, 0}, + {"Marshaler", Type, 0}, + {"MarshalerError", Type, 0}, + {"MarshalerError.Err", Field, 0}, + {"MarshalerError.Type", Field, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"Number", Type, 1}, + {"RawMessage", Type, 0}, + {"SyntaxError", Type, 0}, + {"SyntaxError.Offset", Field, 0}, + {"Token", Type, 5}, + {"Unmarshal", Func, 0}, + {"UnmarshalFieldError", Type, 0}, + {"UnmarshalFieldError.Field", Field, 0}, + {"UnmarshalFieldError.Key", Field, 0}, + {"UnmarshalFieldError.Type", Field, 0}, + {"UnmarshalTypeError", Type, 0}, + {"UnmarshalTypeError.Field", Field, 8}, + {"UnmarshalTypeError.Offset", Field, 5}, + {"UnmarshalTypeError.Struct", Field, 8}, + {"UnmarshalTypeError.Type", Field, 0}, + {"UnmarshalTypeError.Value", Field, 0}, + {"Unmarshaler", Type, 0}, + {"UnsupportedTypeError", Type, 0}, + {"UnsupportedTypeError.Type", Field, 0}, + {"UnsupportedValueError", Type, 0}, + {"UnsupportedValueError.Str", Field, 0}, + {"UnsupportedValueError.Value", Field, 0}, + {"Valid", Func, 9}, + }, + "encoding/pem": { + {"Block", Type, 0}, + {"Block.Bytes", Field, 0}, + {"Block.Headers", Field, 0}, + {"Block.Type", Field, 0}, + {"Decode", Func, 0}, + {"Encode", Func, 0}, + {"EncodeToMemory", Func, 0}, + }, + "encoding/xml": { + {"(*Decoder).Decode", Method, 0}, + {"(*Decoder).DecodeElement", Method, 0}, + {"(*Decoder).InputOffset", Method, 4}, + {"(*Decoder).InputPos", Method, 19}, + {"(*Decoder).RawToken", Method, 0}, + {"(*Decoder).Skip", Method, 0}, + {"(*Decoder).Token", Method, 0}, + {"(*Encoder).Close", Method, 20}, + {"(*Encoder).Encode", Method, 0}, + {"(*Encoder).EncodeElement", Method, 2}, + {"(*Encoder).EncodeToken", Method, 2}, + {"(*Encoder).Flush", Method, 2}, + {"(*Encoder).Indent", Method, 1}, + {"(*SyntaxError).Error", Method, 0}, + {"(*TagPathError).Error", Method, 0}, + {"(*UnsupportedTypeError).Error", Method, 0}, + {"(CharData).Copy", Method, 0}, + {"(Comment).Copy", Method, 0}, + {"(Directive).Copy", Method, 0}, + {"(ProcInst).Copy", Method, 0}, + {"(StartElement).Copy", Method, 0}, + {"(StartElement).End", Method, 2}, + {"(UnmarshalError).Error", Method, 0}, + {"Attr", Type, 0}, + {"Attr.Name", Field, 0}, + {"Attr.Value", Field, 0}, + {"CharData", Type, 0}, + {"Comment", Type, 0}, + {"CopyToken", Func, 0}, + {"Decoder", Type, 0}, + {"Decoder.AutoClose", Field, 0}, + {"Decoder.CharsetReader", Field, 0}, + {"Decoder.DefaultSpace", Field, 1}, + {"Decoder.Entity", Field, 0}, + {"Decoder.Strict", Field, 0}, + {"Directive", Type, 0}, + {"Encoder", Type, 0}, + {"EndElement", Type, 0}, + {"EndElement.Name", Field, 0}, + {"Escape", Func, 0}, + {"EscapeText", Func, 1}, + {"HTMLAutoClose", Var, 0}, + {"HTMLEntity", Var, 0}, + {"Header", Const, 0}, + {"Marshal", Func, 0}, + {"MarshalIndent", Func, 0}, + {"Marshaler", Type, 2}, + {"MarshalerAttr", Type, 2}, + {"Name", Type, 0}, + {"Name.Local", Field, 0}, + {"Name.Space", Field, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"NewTokenDecoder", Func, 10}, + {"ProcInst", Type, 0}, + {"ProcInst.Inst", Field, 0}, + {"ProcInst.Target", Field, 0}, + {"StartElement", Type, 0}, + {"StartElement.Attr", Field, 0}, + {"StartElement.Name", Field, 0}, + {"SyntaxError", Type, 0}, + {"SyntaxError.Line", Field, 0}, + {"SyntaxError.Msg", Field, 0}, + {"TagPathError", Type, 0}, + {"TagPathError.Field1", Field, 0}, + {"TagPathError.Field2", Field, 0}, + {"TagPathError.Struct", Field, 0}, + {"TagPathError.Tag1", Field, 0}, + {"TagPathError.Tag2", Field, 0}, + {"Token", Type, 0}, + {"TokenReader", Type, 10}, + {"Unmarshal", Func, 0}, + {"UnmarshalError", Type, 0}, + {"Unmarshaler", Type, 2}, + {"UnmarshalerAttr", Type, 2}, + {"UnsupportedTypeError", Type, 0}, + {"UnsupportedTypeError.Type", Field, 0}, + }, + "errors": { + {"As", Func, 13}, + {"ErrUnsupported", Var, 21}, + {"Is", Func, 13}, + {"Join", Func, 20}, + {"New", Func, 0}, + {"Unwrap", Func, 13}, + }, + "expvar": { + {"(*Float).Add", Method, 0}, + {"(*Float).Set", Method, 0}, + {"(*Float).String", Method, 0}, + {"(*Float).Value", Method, 8}, + {"(*Int).Add", Method, 0}, + {"(*Int).Set", Method, 0}, + {"(*Int).String", Method, 0}, + {"(*Int).Value", Method, 8}, + {"(*Map).Add", Method, 0}, + {"(*Map).AddFloat", Method, 0}, + {"(*Map).Delete", Method, 12}, + {"(*Map).Do", Method, 0}, + {"(*Map).Get", Method, 0}, + {"(*Map).Init", Method, 0}, + {"(*Map).Set", Method, 0}, + {"(*Map).String", Method, 0}, + {"(*String).Set", Method, 0}, + {"(*String).String", Method, 0}, + {"(*String).Value", Method, 8}, + {"(Func).String", Method, 0}, + {"(Func).Value", Method, 8}, + {"Do", Func, 0}, + {"Float", Type, 0}, + {"Func", Type, 0}, + {"Get", Func, 0}, + {"Handler", Func, 8}, + {"Int", Type, 0}, + {"KeyValue", Type, 0}, + {"KeyValue.Key", Field, 0}, + {"KeyValue.Value", Field, 0}, + {"Map", Type, 0}, + {"NewFloat", Func, 0}, + {"NewInt", Func, 0}, + {"NewMap", Func, 0}, + {"NewString", Func, 0}, + {"Publish", Func, 0}, + {"String", Type, 0}, + {"Var", Type, 0}, + }, + "flag": { + {"(*FlagSet).Arg", Method, 0}, + {"(*FlagSet).Args", Method, 0}, + {"(*FlagSet).Bool", Method, 0}, + {"(*FlagSet).BoolFunc", Method, 21}, + {"(*FlagSet).BoolVar", Method, 0}, + {"(*FlagSet).Duration", Method, 0}, + {"(*FlagSet).DurationVar", Method, 0}, + {"(*FlagSet).ErrorHandling", Method, 10}, + {"(*FlagSet).Float64", Method, 0}, + {"(*FlagSet).Float64Var", Method, 0}, + {"(*FlagSet).Func", Method, 16}, + {"(*FlagSet).Init", Method, 0}, + {"(*FlagSet).Int", Method, 0}, + {"(*FlagSet).Int64", Method, 0}, + {"(*FlagSet).Int64Var", Method, 0}, + {"(*FlagSet).IntVar", Method, 0}, + {"(*FlagSet).Lookup", Method, 0}, + {"(*FlagSet).NArg", Method, 0}, + {"(*FlagSet).NFlag", Method, 0}, + {"(*FlagSet).Name", Method, 10}, + {"(*FlagSet).Output", Method, 10}, + {"(*FlagSet).Parse", Method, 0}, + {"(*FlagSet).Parsed", Method, 0}, + {"(*FlagSet).PrintDefaults", Method, 0}, + {"(*FlagSet).Set", Method, 0}, + {"(*FlagSet).SetOutput", Method, 0}, + {"(*FlagSet).String", Method, 0}, + {"(*FlagSet).StringVar", Method, 0}, + {"(*FlagSet).TextVar", Method, 19}, + {"(*FlagSet).Uint", Method, 0}, + {"(*FlagSet).Uint64", Method, 0}, + {"(*FlagSet).Uint64Var", Method, 0}, + {"(*FlagSet).UintVar", Method, 0}, + {"(*FlagSet).Var", Method, 0}, + {"(*FlagSet).Visit", Method, 0}, + {"(*FlagSet).VisitAll", Method, 0}, + {"Arg", Func, 0}, + {"Args", Func, 0}, + {"Bool", Func, 0}, + {"BoolFunc", Func, 21}, + {"BoolVar", Func, 0}, + {"CommandLine", Var, 2}, + {"ContinueOnError", Const, 0}, + {"Duration", Func, 0}, + {"DurationVar", Func, 0}, + {"ErrHelp", Var, 0}, + {"ErrorHandling", Type, 0}, + {"ExitOnError", Const, 0}, + {"Flag", Type, 0}, + {"Flag.DefValue", Field, 0}, + {"Flag.Name", Field, 0}, + {"Flag.Usage", Field, 0}, + {"Flag.Value", Field, 0}, + {"FlagSet", Type, 0}, + {"FlagSet.Usage", Field, 0}, + {"Float64", Func, 0}, + {"Float64Var", Func, 0}, + {"Func", Func, 16}, + {"Getter", Type, 2}, + {"Int", Func, 0}, + {"Int64", Func, 0}, + {"Int64Var", Func, 0}, + {"IntVar", Func, 0}, + {"Lookup", Func, 0}, + {"NArg", Func, 0}, + {"NFlag", Func, 0}, + {"NewFlagSet", Func, 0}, + {"PanicOnError", Const, 0}, + {"Parse", Func, 0}, + {"Parsed", Func, 0}, + {"PrintDefaults", Func, 0}, + {"Set", Func, 0}, + {"String", Func, 0}, + {"StringVar", Func, 0}, + {"TextVar", Func, 19}, + {"Uint", Func, 0}, + {"Uint64", Func, 0}, + {"Uint64Var", Func, 0}, + {"UintVar", Func, 0}, + {"UnquoteUsage", Func, 5}, + {"Usage", Var, 0}, + {"Value", Type, 0}, + {"Var", Func, 0}, + {"Visit", Func, 0}, + {"VisitAll", Func, 0}, + }, + "fmt": { + {"Append", Func, 19}, + {"Appendf", Func, 19}, + {"Appendln", Func, 19}, + {"Errorf", Func, 0}, + {"FormatString", Func, 20}, + {"Formatter", Type, 0}, + {"Fprint", Func, 0}, + {"Fprintf", Func, 0}, + {"Fprintln", Func, 0}, + {"Fscan", Func, 0}, + {"Fscanf", Func, 0}, + {"Fscanln", Func, 0}, + {"GoStringer", Type, 0}, + {"Print", Func, 0}, + {"Printf", Func, 0}, + {"Println", Func, 0}, + {"Scan", Func, 0}, + {"ScanState", Type, 0}, + {"Scanf", Func, 0}, + {"Scanln", Func, 0}, + {"Scanner", Type, 0}, + {"Sprint", Func, 0}, + {"Sprintf", Func, 0}, + {"Sprintln", Func, 0}, + {"Sscan", Func, 0}, + {"Sscanf", Func, 0}, + {"Sscanln", Func, 0}, + {"State", Type, 0}, + {"Stringer", Type, 0}, + }, + "go/ast": { + {"(*ArrayType).End", Method, 0}, + {"(*ArrayType).Pos", Method, 0}, + {"(*AssignStmt).End", Method, 0}, + {"(*AssignStmt).Pos", Method, 0}, + {"(*BadDecl).End", Method, 0}, + {"(*BadDecl).Pos", Method, 0}, + {"(*BadExpr).End", Method, 0}, + {"(*BadExpr).Pos", Method, 0}, + {"(*BadStmt).End", Method, 0}, + {"(*BadStmt).Pos", Method, 0}, + {"(*BasicLit).End", Method, 0}, + {"(*BasicLit).Pos", Method, 0}, + {"(*BinaryExpr).End", Method, 0}, + {"(*BinaryExpr).Pos", Method, 0}, + {"(*BlockStmt).End", Method, 0}, + {"(*BlockStmt).Pos", Method, 0}, + {"(*BranchStmt).End", Method, 0}, + {"(*BranchStmt).Pos", Method, 0}, + {"(*CallExpr).End", Method, 0}, + {"(*CallExpr).Pos", Method, 0}, + {"(*CaseClause).End", Method, 0}, + {"(*CaseClause).Pos", Method, 0}, + {"(*ChanType).End", Method, 0}, + {"(*ChanType).Pos", Method, 0}, + {"(*CommClause).End", Method, 0}, + {"(*CommClause).Pos", Method, 0}, + {"(*Comment).End", Method, 0}, + {"(*Comment).Pos", Method, 0}, + {"(*CommentGroup).End", Method, 0}, + {"(*CommentGroup).Pos", Method, 0}, + {"(*CommentGroup).Text", Method, 0}, + {"(*CompositeLit).End", Method, 0}, + {"(*CompositeLit).Pos", Method, 0}, + {"(*DeclStmt).End", Method, 0}, + {"(*DeclStmt).Pos", Method, 0}, + {"(*DeferStmt).End", Method, 0}, + {"(*DeferStmt).Pos", Method, 0}, + {"(*Ellipsis).End", Method, 0}, + {"(*Ellipsis).Pos", Method, 0}, + {"(*EmptyStmt).End", Method, 0}, + {"(*EmptyStmt).Pos", Method, 0}, + {"(*ExprStmt).End", Method, 0}, + {"(*ExprStmt).Pos", Method, 0}, + {"(*Field).End", Method, 0}, + {"(*Field).Pos", Method, 0}, + {"(*FieldList).End", Method, 0}, + {"(*FieldList).NumFields", Method, 0}, + {"(*FieldList).Pos", Method, 0}, + {"(*File).End", Method, 0}, + {"(*File).Pos", Method, 0}, + {"(*ForStmt).End", Method, 0}, + {"(*ForStmt).Pos", Method, 0}, + {"(*FuncDecl).End", Method, 0}, + {"(*FuncDecl).Pos", Method, 0}, + {"(*FuncLit).End", Method, 0}, + {"(*FuncLit).Pos", Method, 0}, + {"(*FuncType).End", Method, 0}, + {"(*FuncType).Pos", Method, 0}, + {"(*GenDecl).End", Method, 0}, + {"(*GenDecl).Pos", Method, 0}, + {"(*GoStmt).End", Method, 0}, + {"(*GoStmt).Pos", Method, 0}, + {"(*Ident).End", Method, 0}, + {"(*Ident).IsExported", Method, 0}, + {"(*Ident).Pos", Method, 0}, + {"(*Ident).String", Method, 0}, + {"(*IfStmt).End", Method, 0}, + {"(*IfStmt).Pos", Method, 0}, + {"(*ImportSpec).End", Method, 0}, + {"(*ImportSpec).Pos", Method, 0}, + {"(*IncDecStmt).End", Method, 0}, + {"(*IncDecStmt).Pos", Method, 0}, + {"(*IndexExpr).End", Method, 0}, + {"(*IndexExpr).Pos", Method, 0}, + {"(*IndexListExpr).End", Method, 18}, + {"(*IndexListExpr).Pos", Method, 18}, + {"(*InterfaceType).End", Method, 0}, + {"(*InterfaceType).Pos", Method, 0}, + {"(*KeyValueExpr).End", Method, 0}, + {"(*KeyValueExpr).Pos", Method, 0}, + {"(*LabeledStmt).End", Method, 0}, + {"(*LabeledStmt).Pos", Method, 0}, + {"(*MapType).End", Method, 0}, + {"(*MapType).Pos", Method, 0}, + {"(*Object).Pos", Method, 0}, + {"(*Package).End", Method, 0}, + {"(*Package).Pos", Method, 0}, + {"(*ParenExpr).End", Method, 0}, + {"(*ParenExpr).Pos", Method, 0}, + {"(*RangeStmt).End", Method, 0}, + {"(*RangeStmt).Pos", Method, 0}, + {"(*ReturnStmt).End", Method, 0}, + {"(*ReturnStmt).Pos", Method, 0}, + {"(*Scope).Insert", Method, 0}, + {"(*Scope).Lookup", Method, 0}, + {"(*Scope).String", Method, 0}, + {"(*SelectStmt).End", Method, 0}, + {"(*SelectStmt).Pos", Method, 0}, + {"(*SelectorExpr).End", Method, 0}, + {"(*SelectorExpr).Pos", Method, 0}, + {"(*SendStmt).End", Method, 0}, + {"(*SendStmt).Pos", Method, 0}, + {"(*SliceExpr).End", Method, 0}, + {"(*SliceExpr).Pos", Method, 0}, + {"(*StarExpr).End", Method, 0}, + {"(*StarExpr).Pos", Method, 0}, + {"(*StructType).End", Method, 0}, + {"(*StructType).Pos", Method, 0}, + {"(*SwitchStmt).End", Method, 0}, + {"(*SwitchStmt).Pos", Method, 0}, + {"(*TypeAssertExpr).End", Method, 0}, + {"(*TypeAssertExpr).Pos", Method, 0}, + {"(*TypeSpec).End", Method, 0}, + {"(*TypeSpec).Pos", Method, 0}, + {"(*TypeSwitchStmt).End", Method, 0}, + {"(*TypeSwitchStmt).Pos", Method, 0}, + {"(*UnaryExpr).End", Method, 0}, + {"(*UnaryExpr).Pos", Method, 0}, + {"(*ValueSpec).End", Method, 0}, + {"(*ValueSpec).Pos", Method, 0}, + {"(CommentMap).Comments", Method, 1}, + {"(CommentMap).Filter", Method, 1}, + {"(CommentMap).String", Method, 1}, + {"(CommentMap).Update", Method, 1}, + {"(ObjKind).String", Method, 0}, + {"ArrayType", Type, 0}, + {"ArrayType.Elt", Field, 0}, + {"ArrayType.Lbrack", Field, 0}, + {"ArrayType.Len", Field, 0}, + {"AssignStmt", Type, 0}, + {"AssignStmt.Lhs", Field, 0}, + {"AssignStmt.Rhs", Field, 0}, + {"AssignStmt.Tok", Field, 0}, + {"AssignStmt.TokPos", Field, 0}, + {"Bad", Const, 0}, + {"BadDecl", Type, 0}, + {"BadDecl.From", Field, 0}, + {"BadDecl.To", Field, 0}, + {"BadExpr", Type, 0}, + {"BadExpr.From", Field, 0}, + {"BadExpr.To", Field, 0}, + {"BadStmt", Type, 0}, + {"BadStmt.From", Field, 0}, + {"BadStmt.To", Field, 0}, + {"BasicLit", Type, 0}, + {"BasicLit.Kind", Field, 0}, + {"BasicLit.Value", Field, 0}, + {"BasicLit.ValuePos", Field, 0}, + {"BinaryExpr", Type, 0}, + {"BinaryExpr.Op", Field, 0}, + {"BinaryExpr.OpPos", Field, 0}, + {"BinaryExpr.X", Field, 0}, + {"BinaryExpr.Y", Field, 0}, + {"BlockStmt", Type, 0}, + {"BlockStmt.Lbrace", Field, 0}, + {"BlockStmt.List", Field, 0}, + {"BlockStmt.Rbrace", Field, 0}, + {"BranchStmt", Type, 0}, + {"BranchStmt.Label", Field, 0}, + {"BranchStmt.Tok", Field, 0}, + {"BranchStmt.TokPos", Field, 0}, + {"CallExpr", Type, 0}, + {"CallExpr.Args", Field, 0}, + {"CallExpr.Ellipsis", Field, 0}, + {"CallExpr.Fun", Field, 0}, + {"CallExpr.Lparen", Field, 0}, + {"CallExpr.Rparen", Field, 0}, + {"CaseClause", Type, 0}, + {"CaseClause.Body", Field, 0}, + {"CaseClause.Case", Field, 0}, + {"CaseClause.Colon", Field, 0}, + {"CaseClause.List", Field, 0}, + {"ChanDir", Type, 0}, + {"ChanType", Type, 0}, + {"ChanType.Arrow", Field, 1}, + {"ChanType.Begin", Field, 0}, + {"ChanType.Dir", Field, 0}, + {"ChanType.Value", Field, 0}, + {"CommClause", Type, 0}, + {"CommClause.Body", Field, 0}, + {"CommClause.Case", Field, 0}, + {"CommClause.Colon", Field, 0}, + {"CommClause.Comm", Field, 0}, + {"Comment", Type, 0}, + {"Comment.Slash", Field, 0}, + {"Comment.Text", Field, 0}, + {"CommentGroup", Type, 0}, + {"CommentGroup.List", Field, 0}, + {"CommentMap", Type, 1}, + {"CompositeLit", Type, 0}, + {"CompositeLit.Elts", Field, 0}, + {"CompositeLit.Incomplete", Field, 11}, + {"CompositeLit.Lbrace", Field, 0}, + {"CompositeLit.Rbrace", Field, 0}, + {"CompositeLit.Type", Field, 0}, + {"Con", Const, 0}, + {"Decl", Type, 0}, + {"DeclStmt", Type, 0}, + {"DeclStmt.Decl", Field, 0}, + {"DeferStmt", Type, 0}, + {"DeferStmt.Call", Field, 0}, + {"DeferStmt.Defer", Field, 0}, + {"Ellipsis", Type, 0}, + {"Ellipsis.Ellipsis", Field, 0}, + {"Ellipsis.Elt", Field, 0}, + {"EmptyStmt", Type, 0}, + {"EmptyStmt.Implicit", Field, 5}, + {"EmptyStmt.Semicolon", Field, 0}, + {"Expr", Type, 0}, + {"ExprStmt", Type, 0}, + {"ExprStmt.X", Field, 0}, + {"Field", Type, 0}, + {"Field.Comment", Field, 0}, + {"Field.Doc", Field, 0}, + {"Field.Names", Field, 0}, + {"Field.Tag", Field, 0}, + {"Field.Type", Field, 0}, + {"FieldFilter", Type, 0}, + {"FieldList", Type, 0}, + {"FieldList.Closing", Field, 0}, + {"FieldList.List", Field, 0}, + {"FieldList.Opening", Field, 0}, + {"File", Type, 0}, + {"File.Comments", Field, 0}, + {"File.Decls", Field, 0}, + {"File.Doc", Field, 0}, + {"File.FileEnd", Field, 20}, + {"File.FileStart", Field, 20}, + {"File.GoVersion", Field, 21}, + {"File.Imports", Field, 0}, + {"File.Name", Field, 0}, + {"File.Package", Field, 0}, + {"File.Scope", Field, 0}, + {"File.Unresolved", Field, 0}, + {"FileExports", Func, 0}, + {"Filter", Type, 0}, + {"FilterDecl", Func, 0}, + {"FilterFile", Func, 0}, + {"FilterFuncDuplicates", Const, 0}, + {"FilterImportDuplicates", Const, 0}, + {"FilterPackage", Func, 0}, + {"FilterUnassociatedComments", Const, 0}, + {"ForStmt", Type, 0}, + {"ForStmt.Body", Field, 0}, + {"ForStmt.Cond", Field, 0}, + {"ForStmt.For", Field, 0}, + {"ForStmt.Init", Field, 0}, + {"ForStmt.Post", Field, 0}, + {"Fprint", Func, 0}, + {"Fun", Const, 0}, + {"FuncDecl", Type, 0}, + {"FuncDecl.Body", Field, 0}, + {"FuncDecl.Doc", Field, 0}, + {"FuncDecl.Name", Field, 0}, + {"FuncDecl.Recv", Field, 0}, + {"FuncDecl.Type", Field, 0}, + {"FuncLit", Type, 0}, + {"FuncLit.Body", Field, 0}, + {"FuncLit.Type", Field, 0}, + {"FuncType", Type, 0}, + {"FuncType.Func", Field, 0}, + {"FuncType.Params", Field, 0}, + {"FuncType.Results", Field, 0}, + {"FuncType.TypeParams", Field, 18}, + {"GenDecl", Type, 0}, + {"GenDecl.Doc", Field, 0}, + {"GenDecl.Lparen", Field, 0}, + {"GenDecl.Rparen", Field, 0}, + {"GenDecl.Specs", Field, 0}, + {"GenDecl.Tok", Field, 0}, + {"GenDecl.TokPos", Field, 0}, + {"GoStmt", Type, 0}, + {"GoStmt.Call", Field, 0}, + {"GoStmt.Go", Field, 0}, + {"Ident", Type, 0}, + {"Ident.Name", Field, 0}, + {"Ident.NamePos", Field, 0}, + {"Ident.Obj", Field, 0}, + {"IfStmt", Type, 0}, + {"IfStmt.Body", Field, 0}, + {"IfStmt.Cond", Field, 0}, + {"IfStmt.Else", Field, 0}, + {"IfStmt.If", Field, 0}, + {"IfStmt.Init", Field, 0}, + {"ImportSpec", Type, 0}, + {"ImportSpec.Comment", Field, 0}, + {"ImportSpec.Doc", Field, 0}, + {"ImportSpec.EndPos", Field, 0}, + {"ImportSpec.Name", Field, 0}, + {"ImportSpec.Path", Field, 0}, + {"Importer", Type, 0}, + {"IncDecStmt", Type, 0}, + {"IncDecStmt.Tok", Field, 0}, + {"IncDecStmt.TokPos", Field, 0}, + {"IncDecStmt.X", Field, 0}, + {"IndexExpr", Type, 0}, + {"IndexExpr.Index", Field, 0}, + {"IndexExpr.Lbrack", Field, 0}, + {"IndexExpr.Rbrack", Field, 0}, + {"IndexExpr.X", Field, 0}, + {"IndexListExpr", Type, 18}, + {"IndexListExpr.Indices", Field, 18}, + {"IndexListExpr.Lbrack", Field, 18}, + {"IndexListExpr.Rbrack", Field, 18}, + {"IndexListExpr.X", Field, 18}, + {"Inspect", Func, 0}, + {"InterfaceType", Type, 0}, + {"InterfaceType.Incomplete", Field, 0}, + {"InterfaceType.Interface", Field, 0}, + {"InterfaceType.Methods", Field, 0}, + {"IsExported", Func, 0}, + {"IsGenerated", Func, 21}, + {"KeyValueExpr", Type, 0}, + {"KeyValueExpr.Colon", Field, 0}, + {"KeyValueExpr.Key", Field, 0}, + {"KeyValueExpr.Value", Field, 0}, + {"LabeledStmt", Type, 0}, + {"LabeledStmt.Colon", Field, 0}, + {"LabeledStmt.Label", Field, 0}, + {"LabeledStmt.Stmt", Field, 0}, + {"Lbl", Const, 0}, + {"MapType", Type, 0}, + {"MapType.Key", Field, 0}, + {"MapType.Map", Field, 0}, + {"MapType.Value", Field, 0}, + {"MergeMode", Type, 0}, + {"MergePackageFiles", Func, 0}, + {"NewCommentMap", Func, 1}, + {"NewIdent", Func, 0}, + {"NewObj", Func, 0}, + {"NewPackage", Func, 0}, + {"NewScope", Func, 0}, + {"Node", Type, 0}, + {"NotNilFilter", Func, 0}, + {"ObjKind", Type, 0}, + {"Object", Type, 0}, + {"Object.Data", Field, 0}, + {"Object.Decl", Field, 0}, + {"Object.Kind", Field, 0}, + {"Object.Name", Field, 0}, + {"Object.Type", Field, 0}, + {"Package", Type, 0}, + {"Package.Files", Field, 0}, + {"Package.Imports", Field, 0}, + {"Package.Name", Field, 0}, + {"Package.Scope", Field, 0}, + {"PackageExports", Func, 0}, + {"ParenExpr", Type, 0}, + {"ParenExpr.Lparen", Field, 0}, + {"ParenExpr.Rparen", Field, 0}, + {"ParenExpr.X", Field, 0}, + {"Pkg", Const, 0}, + {"Print", Func, 0}, + {"RECV", Const, 0}, + {"RangeStmt", Type, 0}, + {"RangeStmt.Body", Field, 0}, + {"RangeStmt.For", Field, 0}, + {"RangeStmt.Key", Field, 0}, + {"RangeStmt.Range", Field, 20}, + {"RangeStmt.Tok", Field, 0}, + {"RangeStmt.TokPos", Field, 0}, + {"RangeStmt.Value", Field, 0}, + {"RangeStmt.X", Field, 0}, + {"ReturnStmt", Type, 0}, + {"ReturnStmt.Results", Field, 0}, + {"ReturnStmt.Return", Field, 0}, + {"SEND", Const, 0}, + {"Scope", Type, 0}, + {"Scope.Objects", Field, 0}, + {"Scope.Outer", Field, 0}, + {"SelectStmt", Type, 0}, + {"SelectStmt.Body", Field, 0}, + {"SelectStmt.Select", Field, 0}, + {"SelectorExpr", Type, 0}, + {"SelectorExpr.Sel", Field, 0}, + {"SelectorExpr.X", Field, 0}, + {"SendStmt", Type, 0}, + {"SendStmt.Arrow", Field, 0}, + {"SendStmt.Chan", Field, 0}, + {"SendStmt.Value", Field, 0}, + {"SliceExpr", Type, 0}, + {"SliceExpr.High", Field, 0}, + {"SliceExpr.Lbrack", Field, 0}, + {"SliceExpr.Low", Field, 0}, + {"SliceExpr.Max", Field, 2}, + {"SliceExpr.Rbrack", Field, 0}, + {"SliceExpr.Slice3", Field, 2}, + {"SliceExpr.X", Field, 0}, + {"SortImports", Func, 0}, + {"Spec", Type, 0}, + {"StarExpr", Type, 0}, + {"StarExpr.Star", Field, 0}, + {"StarExpr.X", Field, 0}, + {"Stmt", Type, 0}, + {"StructType", Type, 0}, + {"StructType.Fields", Field, 0}, + {"StructType.Incomplete", Field, 0}, + {"StructType.Struct", Field, 0}, + {"SwitchStmt", Type, 0}, + {"SwitchStmt.Body", Field, 0}, + {"SwitchStmt.Init", Field, 0}, + {"SwitchStmt.Switch", Field, 0}, + {"SwitchStmt.Tag", Field, 0}, + {"Typ", Const, 0}, + {"TypeAssertExpr", Type, 0}, + {"TypeAssertExpr.Lparen", Field, 2}, + {"TypeAssertExpr.Rparen", Field, 2}, + {"TypeAssertExpr.Type", Field, 0}, + {"TypeAssertExpr.X", Field, 0}, + {"TypeSpec", Type, 0}, + {"TypeSpec.Assign", Field, 9}, + {"TypeSpec.Comment", Field, 0}, + {"TypeSpec.Doc", Field, 0}, + {"TypeSpec.Name", Field, 0}, + {"TypeSpec.Type", Field, 0}, + {"TypeSpec.TypeParams", Field, 18}, + {"TypeSwitchStmt", Type, 0}, + {"TypeSwitchStmt.Assign", Field, 0}, + {"TypeSwitchStmt.Body", Field, 0}, + {"TypeSwitchStmt.Init", Field, 0}, + {"TypeSwitchStmt.Switch", Field, 0}, + {"UnaryExpr", Type, 0}, + {"UnaryExpr.Op", Field, 0}, + {"UnaryExpr.OpPos", Field, 0}, + {"UnaryExpr.X", Field, 0}, + {"Unparen", Func, 22}, + {"ValueSpec", Type, 0}, + {"ValueSpec.Comment", Field, 0}, + {"ValueSpec.Doc", Field, 0}, + {"ValueSpec.Names", Field, 0}, + {"ValueSpec.Type", Field, 0}, + {"ValueSpec.Values", Field, 0}, + {"Var", Const, 0}, + {"Visitor", Type, 0}, + {"Walk", Func, 0}, + }, + "go/build": { + {"(*Context).Import", Method, 0}, + {"(*Context).ImportDir", Method, 0}, + {"(*Context).MatchFile", Method, 2}, + {"(*Context).SrcDirs", Method, 0}, + {"(*MultiplePackageError).Error", Method, 4}, + {"(*NoGoError).Error", Method, 0}, + {"(*Package).IsCommand", Method, 0}, + {"AllowBinary", Const, 0}, + {"ArchChar", Func, 0}, + {"Context", Type, 0}, + {"Context.BuildTags", Field, 0}, + {"Context.CgoEnabled", Field, 0}, + {"Context.Compiler", Field, 0}, + {"Context.Dir", Field, 14}, + {"Context.GOARCH", Field, 0}, + {"Context.GOOS", Field, 0}, + {"Context.GOPATH", Field, 0}, + {"Context.GOROOT", Field, 0}, + {"Context.HasSubdir", Field, 0}, + {"Context.InstallSuffix", Field, 1}, + {"Context.IsAbsPath", Field, 0}, + {"Context.IsDir", Field, 0}, + {"Context.JoinPath", Field, 0}, + {"Context.OpenFile", Field, 0}, + {"Context.ReadDir", Field, 0}, + {"Context.ReleaseTags", Field, 1}, + {"Context.SplitPathList", Field, 0}, + {"Context.ToolTags", Field, 17}, + {"Context.UseAllFiles", Field, 0}, + {"Default", Var, 0}, + {"Directive", Type, 21}, + {"Directive.Pos", Field, 21}, + {"Directive.Text", Field, 21}, + {"FindOnly", Const, 0}, + {"IgnoreVendor", Const, 6}, + {"Import", Func, 0}, + {"ImportComment", Const, 4}, + {"ImportDir", Func, 0}, + {"ImportMode", Type, 0}, + {"IsLocalImport", Func, 0}, + {"MultiplePackageError", Type, 4}, + {"MultiplePackageError.Dir", Field, 4}, + {"MultiplePackageError.Files", Field, 4}, + {"MultiplePackageError.Packages", Field, 4}, + {"NoGoError", Type, 0}, + {"NoGoError.Dir", Field, 0}, + {"Package", Type, 0}, + {"Package.AllTags", Field, 2}, + {"Package.BinDir", Field, 0}, + {"Package.BinaryOnly", Field, 7}, + {"Package.CFiles", Field, 0}, + {"Package.CXXFiles", Field, 2}, + {"Package.CgoCFLAGS", Field, 0}, + {"Package.CgoCPPFLAGS", Field, 2}, + {"Package.CgoCXXFLAGS", Field, 2}, + {"Package.CgoFFLAGS", Field, 7}, + {"Package.CgoFiles", Field, 0}, + {"Package.CgoLDFLAGS", Field, 0}, + {"Package.CgoPkgConfig", Field, 0}, + {"Package.ConflictDir", Field, 2}, + {"Package.Dir", Field, 0}, + {"Package.Directives", Field, 21}, + {"Package.Doc", Field, 0}, + {"Package.EmbedPatternPos", Field, 16}, + {"Package.EmbedPatterns", Field, 16}, + {"Package.FFiles", Field, 7}, + {"Package.GoFiles", Field, 0}, + {"Package.Goroot", Field, 0}, + {"Package.HFiles", Field, 0}, + {"Package.IgnoredGoFiles", Field, 1}, + {"Package.IgnoredOtherFiles", Field, 16}, + {"Package.ImportComment", Field, 4}, + {"Package.ImportPath", Field, 0}, + {"Package.ImportPos", Field, 0}, + {"Package.Imports", Field, 0}, + {"Package.InvalidGoFiles", Field, 6}, + {"Package.MFiles", Field, 3}, + {"Package.Name", Field, 0}, + {"Package.PkgObj", Field, 0}, + {"Package.PkgRoot", Field, 0}, + {"Package.PkgTargetRoot", Field, 5}, + {"Package.Root", Field, 0}, + {"Package.SFiles", Field, 0}, + {"Package.SrcRoot", Field, 0}, + {"Package.SwigCXXFiles", Field, 1}, + {"Package.SwigFiles", Field, 1}, + {"Package.SysoFiles", Field, 0}, + {"Package.TestDirectives", Field, 21}, + {"Package.TestEmbedPatternPos", Field, 16}, + {"Package.TestEmbedPatterns", Field, 16}, + {"Package.TestGoFiles", Field, 0}, + {"Package.TestImportPos", Field, 0}, + {"Package.TestImports", Field, 0}, + {"Package.XTestDirectives", Field, 21}, + {"Package.XTestEmbedPatternPos", Field, 16}, + {"Package.XTestEmbedPatterns", Field, 16}, + {"Package.XTestGoFiles", Field, 0}, + {"Package.XTestImportPos", Field, 0}, + {"Package.XTestImports", Field, 0}, + {"ToolDir", Var, 0}, + }, + "go/build/constraint": { + {"(*AndExpr).Eval", Method, 16}, + {"(*AndExpr).String", Method, 16}, + {"(*NotExpr).Eval", Method, 16}, + {"(*NotExpr).String", Method, 16}, + {"(*OrExpr).Eval", Method, 16}, + {"(*OrExpr).String", Method, 16}, + {"(*SyntaxError).Error", Method, 16}, + {"(*TagExpr).Eval", Method, 16}, + {"(*TagExpr).String", Method, 16}, + {"AndExpr", Type, 16}, + {"AndExpr.X", Field, 16}, + {"AndExpr.Y", Field, 16}, + {"Expr", Type, 16}, + {"GoVersion", Func, 21}, + {"IsGoBuild", Func, 16}, + {"IsPlusBuild", Func, 16}, + {"NotExpr", Type, 16}, + {"NotExpr.X", Field, 16}, + {"OrExpr", Type, 16}, + {"OrExpr.X", Field, 16}, + {"OrExpr.Y", Field, 16}, + {"Parse", Func, 16}, + {"PlusBuildLines", Func, 16}, + {"SyntaxError", Type, 16}, + {"SyntaxError.Err", Field, 16}, + {"SyntaxError.Offset", Field, 16}, + {"TagExpr", Type, 16}, + {"TagExpr.Tag", Field, 16}, + }, + "go/constant": { + {"(Kind).String", Method, 18}, + {"BinaryOp", Func, 5}, + {"BitLen", Func, 5}, + {"Bool", Const, 5}, + {"BoolVal", Func, 5}, + {"Bytes", Func, 5}, + {"Compare", Func, 5}, + {"Complex", Const, 5}, + {"Denom", Func, 5}, + {"Float", Const, 5}, + {"Float32Val", Func, 5}, + {"Float64Val", Func, 5}, + {"Imag", Func, 5}, + {"Int", Const, 5}, + {"Int64Val", Func, 5}, + {"Kind", Type, 5}, + {"Make", Func, 13}, + {"MakeBool", Func, 5}, + {"MakeFloat64", Func, 5}, + {"MakeFromBytes", Func, 5}, + {"MakeFromLiteral", Func, 5}, + {"MakeImag", Func, 5}, + {"MakeInt64", Func, 5}, + {"MakeString", Func, 5}, + {"MakeUint64", Func, 5}, + {"MakeUnknown", Func, 5}, + {"Num", Func, 5}, + {"Real", Func, 5}, + {"Shift", Func, 5}, + {"Sign", Func, 5}, + {"String", Const, 5}, + {"StringVal", Func, 5}, + {"ToComplex", Func, 6}, + {"ToFloat", Func, 6}, + {"ToInt", Func, 6}, + {"Uint64Val", Func, 5}, + {"UnaryOp", Func, 5}, + {"Unknown", Const, 5}, + {"Val", Func, 13}, + {"Value", Type, 5}, + }, + "go/doc": { + {"(*Package).Filter", Method, 0}, + {"(*Package).HTML", Method, 19}, + {"(*Package).Markdown", Method, 19}, + {"(*Package).Parser", Method, 19}, + {"(*Package).Printer", Method, 19}, + {"(*Package).Synopsis", Method, 19}, + {"(*Package).Text", Method, 19}, + {"AllDecls", Const, 0}, + {"AllMethods", Const, 0}, + {"Example", Type, 0}, + {"Example.Code", Field, 0}, + {"Example.Comments", Field, 0}, + {"Example.Doc", Field, 0}, + {"Example.EmptyOutput", Field, 1}, + {"Example.Name", Field, 0}, + {"Example.Order", Field, 1}, + {"Example.Output", Field, 0}, + {"Example.Play", Field, 1}, + {"Example.Suffix", Field, 14}, + {"Example.Unordered", Field, 7}, + {"Examples", Func, 0}, + {"Filter", Type, 0}, + {"Func", Type, 0}, + {"Func.Decl", Field, 0}, + {"Func.Doc", Field, 0}, + {"Func.Examples", Field, 14}, + {"Func.Level", Field, 0}, + {"Func.Name", Field, 0}, + {"Func.Orig", Field, 0}, + {"Func.Recv", Field, 0}, + {"IllegalPrefixes", Var, 1}, + {"IsPredeclared", Func, 8}, + {"Mode", Type, 0}, + {"New", Func, 0}, + {"NewFromFiles", Func, 14}, + {"Note", Type, 1}, + {"Note.Body", Field, 1}, + {"Note.End", Field, 1}, + {"Note.Pos", Field, 1}, + {"Note.UID", Field, 1}, + {"Package", Type, 0}, + {"Package.Bugs", Field, 0}, + {"Package.Consts", Field, 0}, + {"Package.Doc", Field, 0}, + {"Package.Examples", Field, 14}, + {"Package.Filenames", Field, 0}, + {"Package.Funcs", Field, 0}, + {"Package.ImportPath", Field, 0}, + {"Package.Imports", Field, 0}, + {"Package.Name", Field, 0}, + {"Package.Notes", Field, 1}, + {"Package.Types", Field, 0}, + {"Package.Vars", Field, 0}, + {"PreserveAST", Const, 12}, + {"Synopsis", Func, 0}, + {"ToHTML", Func, 0}, + {"ToText", Func, 0}, + {"Type", Type, 0}, + {"Type.Consts", Field, 0}, + {"Type.Decl", Field, 0}, + {"Type.Doc", Field, 0}, + {"Type.Examples", Field, 14}, + {"Type.Funcs", Field, 0}, + {"Type.Methods", Field, 0}, + {"Type.Name", Field, 0}, + {"Type.Vars", Field, 0}, + {"Value", Type, 0}, + {"Value.Decl", Field, 0}, + {"Value.Doc", Field, 0}, + {"Value.Names", Field, 0}, + }, + "go/doc/comment": { + {"(*DocLink).DefaultURL", Method, 19}, + {"(*Heading).DefaultID", Method, 19}, + {"(*List).BlankBefore", Method, 19}, + {"(*List).BlankBetween", Method, 19}, + {"(*Parser).Parse", Method, 19}, + {"(*Printer).Comment", Method, 19}, + {"(*Printer).HTML", Method, 19}, + {"(*Printer).Markdown", Method, 19}, + {"(*Printer).Text", Method, 19}, + {"Block", Type, 19}, + {"Code", Type, 19}, + {"Code.Text", Field, 19}, + {"DefaultLookupPackage", Func, 19}, + {"Doc", Type, 19}, + {"Doc.Content", Field, 19}, + {"Doc.Links", Field, 19}, + {"DocLink", Type, 19}, + {"DocLink.ImportPath", Field, 19}, + {"DocLink.Name", Field, 19}, + {"DocLink.Recv", Field, 19}, + {"DocLink.Text", Field, 19}, + {"Heading", Type, 19}, + {"Heading.Text", Field, 19}, + {"Italic", Type, 19}, + {"Link", Type, 19}, + {"Link.Auto", Field, 19}, + {"Link.Text", Field, 19}, + {"Link.URL", Field, 19}, + {"LinkDef", Type, 19}, + {"LinkDef.Text", Field, 19}, + {"LinkDef.URL", Field, 19}, + {"LinkDef.Used", Field, 19}, + {"List", Type, 19}, + {"List.ForceBlankBefore", Field, 19}, + {"List.ForceBlankBetween", Field, 19}, + {"List.Items", Field, 19}, + {"ListItem", Type, 19}, + {"ListItem.Content", Field, 19}, + {"ListItem.Number", Field, 19}, + {"Paragraph", Type, 19}, + {"Paragraph.Text", Field, 19}, + {"Parser", Type, 19}, + {"Parser.LookupPackage", Field, 19}, + {"Parser.LookupSym", Field, 19}, + {"Parser.Words", Field, 19}, + {"Plain", Type, 19}, + {"Printer", Type, 19}, + {"Printer.DocLinkBaseURL", Field, 19}, + {"Printer.DocLinkURL", Field, 19}, + {"Printer.HeadingID", Field, 19}, + {"Printer.HeadingLevel", Field, 19}, + {"Printer.TextCodePrefix", Field, 19}, + {"Printer.TextPrefix", Field, 19}, + {"Printer.TextWidth", Field, 19}, + {"Text", Type, 19}, + }, + "go/format": { + {"Node", Func, 1}, + {"Source", Func, 1}, + }, + "go/importer": { + {"Default", Func, 5}, + {"For", Func, 5}, + {"ForCompiler", Func, 12}, + {"Lookup", Type, 5}, + }, + "go/parser": { + {"AllErrors", Const, 1}, + {"DeclarationErrors", Const, 0}, + {"ImportsOnly", Const, 0}, + {"Mode", Type, 0}, + {"PackageClauseOnly", Const, 0}, + {"ParseComments", Const, 0}, + {"ParseDir", Func, 0}, + {"ParseExpr", Func, 0}, + {"ParseExprFrom", Func, 5}, + {"ParseFile", Func, 0}, + {"SkipObjectResolution", Const, 17}, + {"SpuriousErrors", Const, 0}, + {"Trace", Const, 0}, + }, + "go/printer": { + {"(*Config).Fprint", Method, 0}, + {"CommentedNode", Type, 0}, + {"CommentedNode.Comments", Field, 0}, + {"CommentedNode.Node", Field, 0}, + {"Config", Type, 0}, + {"Config.Indent", Field, 1}, + {"Config.Mode", Field, 0}, + {"Config.Tabwidth", Field, 0}, + {"Fprint", Func, 0}, + {"Mode", Type, 0}, + {"RawFormat", Const, 0}, + {"SourcePos", Const, 0}, + {"TabIndent", Const, 0}, + {"UseSpaces", Const, 0}, + }, + "go/scanner": { + {"(*ErrorList).Add", Method, 0}, + {"(*ErrorList).RemoveMultiples", Method, 0}, + {"(*ErrorList).Reset", Method, 0}, + {"(*Scanner).Init", Method, 0}, + {"(*Scanner).Scan", Method, 0}, + {"(Error).Error", Method, 0}, + {"(ErrorList).Err", Method, 0}, + {"(ErrorList).Error", Method, 0}, + {"(ErrorList).Len", Method, 0}, + {"(ErrorList).Less", Method, 0}, + {"(ErrorList).Sort", Method, 0}, + {"(ErrorList).Swap", Method, 0}, + {"Error", Type, 0}, + {"Error.Msg", Field, 0}, + {"Error.Pos", Field, 0}, + {"ErrorHandler", Type, 0}, + {"ErrorList", Type, 0}, + {"Mode", Type, 0}, + {"PrintError", Func, 0}, + {"ScanComments", Const, 0}, + {"Scanner", Type, 0}, + {"Scanner.ErrorCount", Field, 0}, + }, + "go/token": { + {"(*File).AddLine", Method, 0}, + {"(*File).AddLineColumnInfo", Method, 11}, + {"(*File).AddLineInfo", Method, 0}, + {"(*File).Base", Method, 0}, + {"(*File).Line", Method, 0}, + {"(*File).LineCount", Method, 0}, + {"(*File).LineStart", Method, 12}, + {"(*File).Lines", Method, 21}, + {"(*File).MergeLine", Method, 2}, + {"(*File).Name", Method, 0}, + {"(*File).Offset", Method, 0}, + {"(*File).Pos", Method, 0}, + {"(*File).Position", Method, 0}, + {"(*File).PositionFor", Method, 4}, + {"(*File).SetLines", Method, 0}, + {"(*File).SetLinesForContent", Method, 0}, + {"(*File).Size", Method, 0}, + {"(*FileSet).AddFile", Method, 0}, + {"(*FileSet).Base", Method, 0}, + {"(*FileSet).File", Method, 0}, + {"(*FileSet).Iterate", Method, 0}, + {"(*FileSet).Position", Method, 0}, + {"(*FileSet).PositionFor", Method, 4}, + {"(*FileSet).Read", Method, 0}, + {"(*FileSet).RemoveFile", Method, 20}, + {"(*FileSet).Write", Method, 0}, + {"(*Position).IsValid", Method, 0}, + {"(Pos).IsValid", Method, 0}, + {"(Position).String", Method, 0}, + {"(Token).IsKeyword", Method, 0}, + {"(Token).IsLiteral", Method, 0}, + {"(Token).IsOperator", Method, 0}, + {"(Token).Precedence", Method, 0}, + {"(Token).String", Method, 0}, + {"ADD", Const, 0}, + {"ADD_ASSIGN", Const, 0}, + {"AND", Const, 0}, + {"AND_ASSIGN", Const, 0}, + {"AND_NOT", Const, 0}, + {"AND_NOT_ASSIGN", Const, 0}, + {"ARROW", Const, 0}, + {"ASSIGN", Const, 0}, + {"BREAK", Const, 0}, + {"CASE", Const, 0}, + {"CHAN", Const, 0}, + {"CHAR", Const, 0}, + {"COLON", Const, 0}, + {"COMMA", Const, 0}, + {"COMMENT", Const, 0}, + {"CONST", Const, 0}, + {"CONTINUE", Const, 0}, + {"DEC", Const, 0}, + {"DEFAULT", Const, 0}, + {"DEFER", Const, 0}, + {"DEFINE", Const, 0}, + {"ELLIPSIS", Const, 0}, + {"ELSE", Const, 0}, + {"EOF", Const, 0}, + {"EQL", Const, 0}, + {"FALLTHROUGH", Const, 0}, + {"FLOAT", Const, 0}, + {"FOR", Const, 0}, + {"FUNC", Const, 0}, + {"File", Type, 0}, + {"FileSet", Type, 0}, + {"GEQ", Const, 0}, + {"GO", Const, 0}, + {"GOTO", Const, 0}, + {"GTR", Const, 0}, + {"HighestPrec", Const, 0}, + {"IDENT", Const, 0}, + {"IF", Const, 0}, + {"ILLEGAL", Const, 0}, + {"IMAG", Const, 0}, + {"IMPORT", Const, 0}, + {"INC", Const, 0}, + {"INT", Const, 0}, + {"INTERFACE", Const, 0}, + {"IsExported", Func, 13}, + {"IsIdentifier", Func, 13}, + {"IsKeyword", Func, 13}, + {"LAND", Const, 0}, + {"LBRACE", Const, 0}, + {"LBRACK", Const, 0}, + {"LEQ", Const, 0}, + {"LOR", Const, 0}, + {"LPAREN", Const, 0}, + {"LSS", Const, 0}, + {"Lookup", Func, 0}, + {"LowestPrec", Const, 0}, + {"MAP", Const, 0}, + {"MUL", Const, 0}, + {"MUL_ASSIGN", Const, 0}, + {"NEQ", Const, 0}, + {"NOT", Const, 0}, + {"NewFileSet", Func, 0}, + {"NoPos", Const, 0}, + {"OR", Const, 0}, + {"OR_ASSIGN", Const, 0}, + {"PACKAGE", Const, 0}, + {"PERIOD", Const, 0}, + {"Pos", Type, 0}, + {"Position", Type, 0}, + {"Position.Column", Field, 0}, + {"Position.Filename", Field, 0}, + {"Position.Line", Field, 0}, + {"Position.Offset", Field, 0}, + {"QUO", Const, 0}, + {"QUO_ASSIGN", Const, 0}, + {"RANGE", Const, 0}, + {"RBRACE", Const, 0}, + {"RBRACK", Const, 0}, + {"REM", Const, 0}, + {"REM_ASSIGN", Const, 0}, + {"RETURN", Const, 0}, + {"RPAREN", Const, 0}, + {"SELECT", Const, 0}, + {"SEMICOLON", Const, 0}, + {"SHL", Const, 0}, + {"SHL_ASSIGN", Const, 0}, + {"SHR", Const, 0}, + {"SHR_ASSIGN", Const, 0}, + {"STRING", Const, 0}, + {"STRUCT", Const, 0}, + {"SUB", Const, 0}, + {"SUB_ASSIGN", Const, 0}, + {"SWITCH", Const, 0}, + {"TILDE", Const, 18}, + {"TYPE", Const, 0}, + {"Token", Type, 0}, + {"UnaryPrec", Const, 0}, + {"VAR", Const, 0}, + {"XOR", Const, 0}, + {"XOR_ASSIGN", Const, 0}, + }, + "go/types": { + {"(*Alias).Obj", Method, 22}, + {"(*Alias).String", Method, 22}, + {"(*Alias).Underlying", Method, 22}, + {"(*ArgumentError).Error", Method, 18}, + {"(*ArgumentError).Unwrap", Method, 18}, + {"(*Array).Elem", Method, 5}, + {"(*Array).Len", Method, 5}, + {"(*Array).String", Method, 5}, + {"(*Array).Underlying", Method, 5}, + {"(*Basic).Info", Method, 5}, + {"(*Basic).Kind", Method, 5}, + {"(*Basic).Name", Method, 5}, + {"(*Basic).String", Method, 5}, + {"(*Basic).Underlying", Method, 5}, + {"(*Builtin).Exported", Method, 5}, + {"(*Builtin).Id", Method, 5}, + {"(*Builtin).Name", Method, 5}, + {"(*Builtin).Parent", Method, 5}, + {"(*Builtin).Pkg", Method, 5}, + {"(*Builtin).Pos", Method, 5}, + {"(*Builtin).String", Method, 5}, + {"(*Builtin).Type", Method, 5}, + {"(*Chan).Dir", Method, 5}, + {"(*Chan).Elem", Method, 5}, + {"(*Chan).String", Method, 5}, + {"(*Chan).Underlying", Method, 5}, + {"(*Checker).Files", Method, 5}, + {"(*Config).Check", Method, 5}, + {"(*Const).Exported", Method, 5}, + {"(*Const).Id", Method, 5}, + {"(*Const).Name", Method, 5}, + {"(*Const).Parent", Method, 5}, + {"(*Const).Pkg", Method, 5}, + {"(*Const).Pos", Method, 5}, + {"(*Const).String", Method, 5}, + {"(*Const).Type", Method, 5}, + {"(*Const).Val", Method, 5}, + {"(*Func).Exported", Method, 5}, + {"(*Func).FullName", Method, 5}, + {"(*Func).Id", Method, 5}, + {"(*Func).Name", Method, 5}, + {"(*Func).Origin", Method, 19}, + {"(*Func).Parent", Method, 5}, + {"(*Func).Pkg", Method, 5}, + {"(*Func).Pos", Method, 5}, + {"(*Func).Scope", Method, 5}, + {"(*Func).String", Method, 5}, + {"(*Func).Type", Method, 5}, + {"(*Info).ObjectOf", Method, 5}, + {"(*Info).PkgNameOf", Method, 22}, + {"(*Info).TypeOf", Method, 5}, + {"(*Initializer).String", Method, 5}, + {"(*Interface).Complete", Method, 5}, + {"(*Interface).Embedded", Method, 5}, + {"(*Interface).EmbeddedType", Method, 11}, + {"(*Interface).Empty", Method, 5}, + {"(*Interface).ExplicitMethod", Method, 5}, + {"(*Interface).IsComparable", Method, 18}, + {"(*Interface).IsImplicit", Method, 18}, + {"(*Interface).IsMethodSet", Method, 18}, + {"(*Interface).MarkImplicit", Method, 18}, + {"(*Interface).Method", Method, 5}, + {"(*Interface).NumEmbeddeds", Method, 5}, + {"(*Interface).NumExplicitMethods", Method, 5}, + {"(*Interface).NumMethods", Method, 5}, + {"(*Interface).String", Method, 5}, + {"(*Interface).Underlying", Method, 5}, + {"(*Label).Exported", Method, 5}, + {"(*Label).Id", Method, 5}, + {"(*Label).Name", Method, 5}, + {"(*Label).Parent", Method, 5}, + {"(*Label).Pkg", Method, 5}, + {"(*Label).Pos", Method, 5}, + {"(*Label).String", Method, 5}, + {"(*Label).Type", Method, 5}, + {"(*Map).Elem", Method, 5}, + {"(*Map).Key", Method, 5}, + {"(*Map).String", Method, 5}, + {"(*Map).Underlying", Method, 5}, + {"(*MethodSet).At", Method, 5}, + {"(*MethodSet).Len", Method, 5}, + {"(*MethodSet).Lookup", Method, 5}, + {"(*MethodSet).String", Method, 5}, + {"(*Named).AddMethod", Method, 5}, + {"(*Named).Method", Method, 5}, + {"(*Named).NumMethods", Method, 5}, + {"(*Named).Obj", Method, 5}, + {"(*Named).Origin", Method, 18}, + {"(*Named).SetTypeParams", Method, 18}, + {"(*Named).SetUnderlying", Method, 5}, + {"(*Named).String", Method, 5}, + {"(*Named).TypeArgs", Method, 18}, + {"(*Named).TypeParams", Method, 18}, + {"(*Named).Underlying", Method, 5}, + {"(*Nil).Exported", Method, 5}, + {"(*Nil).Id", Method, 5}, + {"(*Nil).Name", Method, 5}, + {"(*Nil).Parent", Method, 5}, + {"(*Nil).Pkg", Method, 5}, + {"(*Nil).Pos", Method, 5}, + {"(*Nil).String", Method, 5}, + {"(*Nil).Type", Method, 5}, + {"(*Package).Complete", Method, 5}, + {"(*Package).GoVersion", Method, 21}, + {"(*Package).Imports", Method, 5}, + {"(*Package).MarkComplete", Method, 5}, + {"(*Package).Name", Method, 5}, + {"(*Package).Path", Method, 5}, + {"(*Package).Scope", Method, 5}, + {"(*Package).SetImports", Method, 5}, + {"(*Package).SetName", Method, 6}, + {"(*Package).String", Method, 5}, + {"(*PkgName).Exported", Method, 5}, + {"(*PkgName).Id", Method, 5}, + {"(*PkgName).Imported", Method, 5}, + {"(*PkgName).Name", Method, 5}, + {"(*PkgName).Parent", Method, 5}, + {"(*PkgName).Pkg", Method, 5}, + {"(*PkgName).Pos", Method, 5}, + {"(*PkgName).String", Method, 5}, + {"(*PkgName).Type", Method, 5}, + {"(*Pointer).Elem", Method, 5}, + {"(*Pointer).String", Method, 5}, + {"(*Pointer).Underlying", Method, 5}, + {"(*Scope).Child", Method, 5}, + {"(*Scope).Contains", Method, 5}, + {"(*Scope).End", Method, 5}, + {"(*Scope).Innermost", Method, 5}, + {"(*Scope).Insert", Method, 5}, + {"(*Scope).Len", Method, 5}, + {"(*Scope).Lookup", Method, 5}, + {"(*Scope).LookupParent", Method, 5}, + {"(*Scope).Names", Method, 5}, + {"(*Scope).NumChildren", Method, 5}, + {"(*Scope).Parent", Method, 5}, + {"(*Scope).Pos", Method, 5}, + {"(*Scope).String", Method, 5}, + {"(*Scope).WriteTo", Method, 5}, + {"(*Selection).Index", Method, 5}, + {"(*Selection).Indirect", Method, 5}, + {"(*Selection).Kind", Method, 5}, + {"(*Selection).Obj", Method, 5}, + {"(*Selection).Recv", Method, 5}, + {"(*Selection).String", Method, 5}, + {"(*Selection).Type", Method, 5}, + {"(*Signature).Params", Method, 5}, + {"(*Signature).Recv", Method, 5}, + {"(*Signature).RecvTypeParams", Method, 18}, + {"(*Signature).Results", Method, 5}, + {"(*Signature).String", Method, 5}, + {"(*Signature).TypeParams", Method, 18}, + {"(*Signature).Underlying", Method, 5}, + {"(*Signature).Variadic", Method, 5}, + {"(*Slice).Elem", Method, 5}, + {"(*Slice).String", Method, 5}, + {"(*Slice).Underlying", Method, 5}, + {"(*StdSizes).Alignof", Method, 5}, + {"(*StdSizes).Offsetsof", Method, 5}, + {"(*StdSizes).Sizeof", Method, 5}, + {"(*Struct).Field", Method, 5}, + {"(*Struct).NumFields", Method, 5}, + {"(*Struct).String", Method, 5}, + {"(*Struct).Tag", Method, 5}, + {"(*Struct).Underlying", Method, 5}, + {"(*Term).String", Method, 18}, + {"(*Term).Tilde", Method, 18}, + {"(*Term).Type", Method, 18}, + {"(*Tuple).At", Method, 5}, + {"(*Tuple).Len", Method, 5}, + {"(*Tuple).String", Method, 5}, + {"(*Tuple).Underlying", Method, 5}, + {"(*TypeList).At", Method, 18}, + {"(*TypeList).Len", Method, 18}, + {"(*TypeName).Exported", Method, 5}, + {"(*TypeName).Id", Method, 5}, + {"(*TypeName).IsAlias", Method, 9}, + {"(*TypeName).Name", Method, 5}, + {"(*TypeName).Parent", Method, 5}, + {"(*TypeName).Pkg", Method, 5}, + {"(*TypeName).Pos", Method, 5}, + {"(*TypeName).String", Method, 5}, + {"(*TypeName).Type", Method, 5}, + {"(*TypeParam).Constraint", Method, 18}, + {"(*TypeParam).Index", Method, 18}, + {"(*TypeParam).Obj", Method, 18}, + {"(*TypeParam).SetConstraint", Method, 18}, + {"(*TypeParam).String", Method, 18}, + {"(*TypeParam).Underlying", Method, 18}, + {"(*TypeParamList).At", Method, 18}, + {"(*TypeParamList).Len", Method, 18}, + {"(*Union).Len", Method, 18}, + {"(*Union).String", Method, 18}, + {"(*Union).Term", Method, 18}, + {"(*Union).Underlying", Method, 18}, + {"(*Var).Anonymous", Method, 5}, + {"(*Var).Embedded", Method, 11}, + {"(*Var).Exported", Method, 5}, + {"(*Var).Id", Method, 5}, + {"(*Var).IsField", Method, 5}, + {"(*Var).Name", Method, 5}, + {"(*Var).Origin", Method, 19}, + {"(*Var).Parent", Method, 5}, + {"(*Var).Pkg", Method, 5}, + {"(*Var).Pos", Method, 5}, + {"(*Var).String", Method, 5}, + {"(*Var).Type", Method, 5}, + {"(Checker).ObjectOf", Method, 5}, + {"(Checker).PkgNameOf", Method, 22}, + {"(Checker).TypeOf", Method, 5}, + {"(Error).Error", Method, 5}, + {"(TypeAndValue).Addressable", Method, 5}, + {"(TypeAndValue).Assignable", Method, 5}, + {"(TypeAndValue).HasOk", Method, 5}, + {"(TypeAndValue).IsBuiltin", Method, 5}, + {"(TypeAndValue).IsNil", Method, 5}, + {"(TypeAndValue).IsType", Method, 5}, + {"(TypeAndValue).IsValue", Method, 5}, + {"(TypeAndValue).IsVoid", Method, 5}, + {"Alias", Type, 22}, + {"ArgumentError", Type, 18}, + {"ArgumentError.Err", Field, 18}, + {"ArgumentError.Index", Field, 18}, + {"Array", Type, 5}, + {"AssertableTo", Func, 5}, + {"AssignableTo", Func, 5}, + {"Basic", Type, 5}, + {"BasicInfo", Type, 5}, + {"BasicKind", Type, 5}, + {"Bool", Const, 5}, + {"Builtin", Type, 5}, + {"Byte", Const, 5}, + {"Chan", Type, 5}, + {"ChanDir", Type, 5}, + {"CheckExpr", Func, 13}, + {"Checker", Type, 5}, + {"Checker.Info", Field, 5}, + {"Comparable", Func, 5}, + {"Complex128", Const, 5}, + {"Complex64", Const, 5}, + {"Config", Type, 5}, + {"Config.Context", Field, 18}, + {"Config.DisableUnusedImportCheck", Field, 5}, + {"Config.Error", Field, 5}, + {"Config.FakeImportC", Field, 5}, + {"Config.GoVersion", Field, 18}, + {"Config.IgnoreFuncBodies", Field, 5}, + {"Config.Importer", Field, 5}, + {"Config.Sizes", Field, 5}, + {"Const", Type, 5}, + {"Context", Type, 18}, + {"ConvertibleTo", Func, 5}, + {"DefPredeclaredTestFuncs", Func, 5}, + {"Default", Func, 8}, + {"Error", Type, 5}, + {"Error.Fset", Field, 5}, + {"Error.Msg", Field, 5}, + {"Error.Pos", Field, 5}, + {"Error.Soft", Field, 5}, + {"Eval", Func, 5}, + {"ExprString", Func, 5}, + {"FieldVal", Const, 5}, + {"Float32", Const, 5}, + {"Float64", Const, 5}, + {"Func", Type, 5}, + {"Id", Func, 5}, + {"Identical", Func, 5}, + {"IdenticalIgnoreTags", Func, 8}, + {"Implements", Func, 5}, + {"ImportMode", Type, 6}, + {"Importer", Type, 5}, + {"ImporterFrom", Type, 6}, + {"Info", Type, 5}, + {"Info.Defs", Field, 5}, + {"Info.FileVersions", Field, 22}, + {"Info.Implicits", Field, 5}, + {"Info.InitOrder", Field, 5}, + {"Info.Instances", Field, 18}, + {"Info.Scopes", Field, 5}, + {"Info.Selections", Field, 5}, + {"Info.Types", Field, 5}, + {"Info.Uses", Field, 5}, + {"Initializer", Type, 5}, + {"Initializer.Lhs", Field, 5}, + {"Initializer.Rhs", Field, 5}, + {"Instance", Type, 18}, + {"Instance.Type", Field, 18}, + {"Instance.TypeArgs", Field, 18}, + {"Instantiate", Func, 18}, + {"Int", Const, 5}, + {"Int16", Const, 5}, + {"Int32", Const, 5}, + {"Int64", Const, 5}, + {"Int8", Const, 5}, + {"Interface", Type, 5}, + {"Invalid", Const, 5}, + {"IsBoolean", Const, 5}, + {"IsComplex", Const, 5}, + {"IsConstType", Const, 5}, + {"IsFloat", Const, 5}, + {"IsInteger", Const, 5}, + {"IsInterface", Func, 5}, + {"IsNumeric", Const, 5}, + {"IsOrdered", Const, 5}, + {"IsString", Const, 5}, + {"IsUnsigned", Const, 5}, + {"IsUntyped", Const, 5}, + {"Label", Type, 5}, + {"LookupFieldOrMethod", Func, 5}, + {"Map", Type, 5}, + {"MethodExpr", Const, 5}, + {"MethodSet", Type, 5}, + {"MethodVal", Const, 5}, + {"MissingMethod", Func, 5}, + {"Named", Type, 5}, + {"NewAlias", Func, 22}, + {"NewArray", Func, 5}, + {"NewChan", Func, 5}, + {"NewChecker", Func, 5}, + {"NewConst", Func, 5}, + {"NewContext", Func, 18}, + {"NewField", Func, 5}, + {"NewFunc", Func, 5}, + {"NewInterface", Func, 5}, + {"NewInterfaceType", Func, 11}, + {"NewLabel", Func, 5}, + {"NewMap", Func, 5}, + {"NewMethodSet", Func, 5}, + {"NewNamed", Func, 5}, + {"NewPackage", Func, 5}, + {"NewParam", Func, 5}, + {"NewPkgName", Func, 5}, + {"NewPointer", Func, 5}, + {"NewScope", Func, 5}, + {"NewSignature", Func, 5}, + {"NewSignatureType", Func, 18}, + {"NewSlice", Func, 5}, + {"NewStruct", Func, 5}, + {"NewTerm", Func, 18}, + {"NewTuple", Func, 5}, + {"NewTypeName", Func, 5}, + {"NewTypeParam", Func, 18}, + {"NewUnion", Func, 18}, + {"NewVar", Func, 5}, + {"Nil", Type, 5}, + {"Object", Type, 5}, + {"ObjectString", Func, 5}, + {"Package", Type, 5}, + {"PkgName", Type, 5}, + {"Pointer", Type, 5}, + {"Qualifier", Type, 5}, + {"RecvOnly", Const, 5}, + {"RelativeTo", Func, 5}, + {"Rune", Const, 5}, + {"Satisfies", Func, 20}, + {"Scope", Type, 5}, + {"Selection", Type, 5}, + {"SelectionKind", Type, 5}, + {"SelectionString", Func, 5}, + {"SendOnly", Const, 5}, + {"SendRecv", Const, 5}, + {"Signature", Type, 5}, + {"Sizes", Type, 5}, + {"SizesFor", Func, 9}, + {"Slice", Type, 5}, + {"StdSizes", Type, 5}, + {"StdSizes.MaxAlign", Field, 5}, + {"StdSizes.WordSize", Field, 5}, + {"String", Const, 5}, + {"Struct", Type, 5}, + {"Term", Type, 18}, + {"Tuple", Type, 5}, + {"Typ", Var, 5}, + {"Type", Type, 5}, + {"TypeAndValue", Type, 5}, + {"TypeAndValue.Type", Field, 5}, + {"TypeAndValue.Value", Field, 5}, + {"TypeList", Type, 18}, + {"TypeName", Type, 5}, + {"TypeParam", Type, 18}, + {"TypeParamList", Type, 18}, + {"TypeString", Func, 5}, + {"Uint", Const, 5}, + {"Uint16", Const, 5}, + {"Uint32", Const, 5}, + {"Uint64", Const, 5}, + {"Uint8", Const, 5}, + {"Uintptr", Const, 5}, + {"Unalias", Func, 22}, + {"Union", Type, 18}, + {"Universe", Var, 5}, + {"Unsafe", Var, 5}, + {"UnsafePointer", Const, 5}, + {"UntypedBool", Const, 5}, + {"UntypedComplex", Const, 5}, + {"UntypedFloat", Const, 5}, + {"UntypedInt", Const, 5}, + {"UntypedNil", Const, 5}, + {"UntypedRune", Const, 5}, + {"UntypedString", Const, 5}, + {"Var", Type, 5}, + {"WriteExpr", Func, 5}, + {"WriteSignature", Func, 5}, + {"WriteType", Func, 5}, + }, + "go/version": { + {"Compare", Func, 22}, + {"IsValid", Func, 22}, + {"Lang", Func, 22}, + }, + "hash": { + {"Hash", Type, 0}, + {"Hash32", Type, 0}, + {"Hash64", Type, 0}, + }, + "hash/adler32": { + {"Checksum", Func, 0}, + {"New", Func, 0}, + {"Size", Const, 0}, + }, + "hash/crc32": { + {"Castagnoli", Const, 0}, + {"Checksum", Func, 0}, + {"ChecksumIEEE", Func, 0}, + {"IEEE", Const, 0}, + {"IEEETable", Var, 0}, + {"Koopman", Const, 0}, + {"MakeTable", Func, 0}, + {"New", Func, 0}, + {"NewIEEE", Func, 0}, + {"Size", Const, 0}, + {"Table", Type, 0}, + {"Update", Func, 0}, + }, + "hash/crc64": { + {"Checksum", Func, 0}, + {"ECMA", Const, 0}, + {"ISO", Const, 0}, + {"MakeTable", Func, 0}, + {"New", Func, 0}, + {"Size", Const, 0}, + {"Table", Type, 0}, + {"Update", Func, 0}, + }, + "hash/fnv": { + {"New128", Func, 9}, + {"New128a", Func, 9}, + {"New32", Func, 0}, + {"New32a", Func, 0}, + {"New64", Func, 0}, + {"New64a", Func, 0}, + }, + "hash/maphash": { + {"(*Hash).BlockSize", Method, 14}, + {"(*Hash).Reset", Method, 14}, + {"(*Hash).Seed", Method, 14}, + {"(*Hash).SetSeed", Method, 14}, + {"(*Hash).Size", Method, 14}, + {"(*Hash).Sum", Method, 14}, + {"(*Hash).Sum64", Method, 14}, + {"(*Hash).Write", Method, 14}, + {"(*Hash).WriteByte", Method, 14}, + {"(*Hash).WriteString", Method, 14}, + {"Bytes", Func, 19}, + {"Hash", Type, 14}, + {"MakeSeed", Func, 14}, + {"Seed", Type, 14}, + {"String", Func, 19}, + }, + "html": { + {"EscapeString", Func, 0}, + {"UnescapeString", Func, 0}, + }, + "html/template": { + {"(*Error).Error", Method, 0}, + {"(*Template).AddParseTree", Method, 0}, + {"(*Template).Clone", Method, 0}, + {"(*Template).DefinedTemplates", Method, 6}, + {"(*Template).Delims", Method, 0}, + {"(*Template).Execute", Method, 0}, + {"(*Template).ExecuteTemplate", Method, 0}, + {"(*Template).Funcs", Method, 0}, + {"(*Template).Lookup", Method, 0}, + {"(*Template).Name", Method, 0}, + {"(*Template).New", Method, 0}, + {"(*Template).Option", Method, 5}, + {"(*Template).Parse", Method, 0}, + {"(*Template).ParseFS", Method, 16}, + {"(*Template).ParseFiles", Method, 0}, + {"(*Template).ParseGlob", Method, 0}, + {"(*Template).Templates", Method, 0}, + {"CSS", Type, 0}, + {"ErrAmbigContext", Const, 0}, + {"ErrBadHTML", Const, 0}, + {"ErrBranchEnd", Const, 0}, + {"ErrEndContext", Const, 0}, + {"ErrJSTemplate", Const, 21}, + {"ErrNoSuchTemplate", Const, 0}, + {"ErrOutputContext", Const, 0}, + {"ErrPartialCharset", Const, 0}, + {"ErrPartialEscape", Const, 0}, + {"ErrPredefinedEscaper", Const, 9}, + {"ErrRangeLoopReentry", Const, 0}, + {"ErrSlashAmbig", Const, 0}, + {"Error", Type, 0}, + {"Error.Description", Field, 0}, + {"Error.ErrorCode", Field, 0}, + {"Error.Line", Field, 0}, + {"Error.Name", Field, 0}, + {"Error.Node", Field, 4}, + {"ErrorCode", Type, 0}, + {"FuncMap", Type, 0}, + {"HTML", Type, 0}, + {"HTMLAttr", Type, 0}, + {"HTMLEscape", Func, 0}, + {"HTMLEscapeString", Func, 0}, + {"HTMLEscaper", Func, 0}, + {"IsTrue", Func, 6}, + {"JS", Type, 0}, + {"JSEscape", Func, 0}, + {"JSEscapeString", Func, 0}, + {"JSEscaper", Func, 0}, + {"JSStr", Type, 0}, + {"Must", Func, 0}, + {"New", Func, 0}, + {"OK", Const, 0}, + {"ParseFS", Func, 16}, + {"ParseFiles", Func, 0}, + {"ParseGlob", Func, 0}, + {"Srcset", Type, 10}, + {"Template", Type, 0}, + {"Template.Tree", Field, 2}, + {"URL", Type, 0}, + {"URLQueryEscaper", Func, 0}, + }, + "image": { + {"(*Alpha).AlphaAt", Method, 4}, + {"(*Alpha).At", Method, 0}, + {"(*Alpha).Bounds", Method, 0}, + {"(*Alpha).ColorModel", Method, 0}, + {"(*Alpha).Opaque", Method, 0}, + {"(*Alpha).PixOffset", Method, 0}, + {"(*Alpha).RGBA64At", Method, 17}, + {"(*Alpha).Set", Method, 0}, + {"(*Alpha).SetAlpha", Method, 0}, + {"(*Alpha).SetRGBA64", Method, 17}, + {"(*Alpha).SubImage", Method, 0}, + {"(*Alpha16).Alpha16At", Method, 4}, + {"(*Alpha16).At", Method, 0}, + {"(*Alpha16).Bounds", Method, 0}, + {"(*Alpha16).ColorModel", Method, 0}, + {"(*Alpha16).Opaque", Method, 0}, + {"(*Alpha16).PixOffset", Method, 0}, + {"(*Alpha16).RGBA64At", Method, 17}, + {"(*Alpha16).Set", Method, 0}, + {"(*Alpha16).SetAlpha16", Method, 0}, + {"(*Alpha16).SetRGBA64", Method, 17}, + {"(*Alpha16).SubImage", Method, 0}, + {"(*CMYK).At", Method, 5}, + {"(*CMYK).Bounds", Method, 5}, + {"(*CMYK).CMYKAt", Method, 5}, + {"(*CMYK).ColorModel", Method, 5}, + {"(*CMYK).Opaque", Method, 5}, + {"(*CMYK).PixOffset", Method, 5}, + {"(*CMYK).RGBA64At", Method, 17}, + {"(*CMYK).Set", Method, 5}, + {"(*CMYK).SetCMYK", Method, 5}, + {"(*CMYK).SetRGBA64", Method, 17}, + {"(*CMYK).SubImage", Method, 5}, + {"(*Gray).At", Method, 0}, + {"(*Gray).Bounds", Method, 0}, + {"(*Gray).ColorModel", Method, 0}, + {"(*Gray).GrayAt", Method, 4}, + {"(*Gray).Opaque", Method, 0}, + {"(*Gray).PixOffset", Method, 0}, + {"(*Gray).RGBA64At", Method, 17}, + {"(*Gray).Set", Method, 0}, + {"(*Gray).SetGray", Method, 0}, + {"(*Gray).SetRGBA64", Method, 17}, + {"(*Gray).SubImage", Method, 0}, + {"(*Gray16).At", Method, 0}, + {"(*Gray16).Bounds", Method, 0}, + {"(*Gray16).ColorModel", Method, 0}, + {"(*Gray16).Gray16At", Method, 4}, + {"(*Gray16).Opaque", Method, 0}, + {"(*Gray16).PixOffset", Method, 0}, + {"(*Gray16).RGBA64At", Method, 17}, + {"(*Gray16).Set", Method, 0}, + {"(*Gray16).SetGray16", Method, 0}, + {"(*Gray16).SetRGBA64", Method, 17}, + {"(*Gray16).SubImage", Method, 0}, + {"(*NRGBA).At", Method, 0}, + {"(*NRGBA).Bounds", Method, 0}, + {"(*NRGBA).ColorModel", Method, 0}, + {"(*NRGBA).NRGBAAt", Method, 4}, + {"(*NRGBA).Opaque", Method, 0}, + {"(*NRGBA).PixOffset", Method, 0}, + {"(*NRGBA).RGBA64At", Method, 17}, + {"(*NRGBA).Set", Method, 0}, + {"(*NRGBA).SetNRGBA", Method, 0}, + {"(*NRGBA).SetRGBA64", Method, 17}, + {"(*NRGBA).SubImage", Method, 0}, + {"(*NRGBA64).At", Method, 0}, + {"(*NRGBA64).Bounds", Method, 0}, + {"(*NRGBA64).ColorModel", Method, 0}, + {"(*NRGBA64).NRGBA64At", Method, 4}, + {"(*NRGBA64).Opaque", Method, 0}, + {"(*NRGBA64).PixOffset", Method, 0}, + {"(*NRGBA64).RGBA64At", Method, 17}, + {"(*NRGBA64).Set", Method, 0}, + {"(*NRGBA64).SetNRGBA64", Method, 0}, + {"(*NRGBA64).SetRGBA64", Method, 17}, + {"(*NRGBA64).SubImage", Method, 0}, + {"(*NYCbCrA).AOffset", Method, 6}, + {"(*NYCbCrA).At", Method, 6}, + {"(*NYCbCrA).Bounds", Method, 6}, + {"(*NYCbCrA).COffset", Method, 6}, + {"(*NYCbCrA).ColorModel", Method, 6}, + {"(*NYCbCrA).NYCbCrAAt", Method, 6}, + {"(*NYCbCrA).Opaque", Method, 6}, + {"(*NYCbCrA).RGBA64At", Method, 17}, + {"(*NYCbCrA).SubImage", Method, 6}, + {"(*NYCbCrA).YCbCrAt", Method, 6}, + {"(*NYCbCrA).YOffset", Method, 6}, + {"(*Paletted).At", Method, 0}, + {"(*Paletted).Bounds", Method, 0}, + {"(*Paletted).ColorIndexAt", Method, 0}, + {"(*Paletted).ColorModel", Method, 0}, + {"(*Paletted).Opaque", Method, 0}, + {"(*Paletted).PixOffset", Method, 0}, + {"(*Paletted).RGBA64At", Method, 17}, + {"(*Paletted).Set", Method, 0}, + {"(*Paletted).SetColorIndex", Method, 0}, + {"(*Paletted).SetRGBA64", Method, 17}, + {"(*Paletted).SubImage", Method, 0}, + {"(*RGBA).At", Method, 0}, + {"(*RGBA).Bounds", Method, 0}, + {"(*RGBA).ColorModel", Method, 0}, + {"(*RGBA).Opaque", Method, 0}, + {"(*RGBA).PixOffset", Method, 0}, + {"(*RGBA).RGBA64At", Method, 17}, + {"(*RGBA).RGBAAt", Method, 4}, + {"(*RGBA).Set", Method, 0}, + {"(*RGBA).SetRGBA", Method, 0}, + {"(*RGBA).SetRGBA64", Method, 17}, + {"(*RGBA).SubImage", Method, 0}, + {"(*RGBA64).At", Method, 0}, + {"(*RGBA64).Bounds", Method, 0}, + {"(*RGBA64).ColorModel", Method, 0}, + {"(*RGBA64).Opaque", Method, 0}, + {"(*RGBA64).PixOffset", Method, 0}, + {"(*RGBA64).RGBA64At", Method, 4}, + {"(*RGBA64).Set", Method, 0}, + {"(*RGBA64).SetRGBA64", Method, 0}, + {"(*RGBA64).SubImage", Method, 0}, + {"(*Uniform).At", Method, 0}, + {"(*Uniform).Bounds", Method, 0}, + {"(*Uniform).ColorModel", Method, 0}, + {"(*Uniform).Convert", Method, 0}, + {"(*Uniform).Opaque", Method, 0}, + {"(*Uniform).RGBA", Method, 0}, + {"(*Uniform).RGBA64At", Method, 17}, + {"(*YCbCr).At", Method, 0}, + {"(*YCbCr).Bounds", Method, 0}, + {"(*YCbCr).COffset", Method, 0}, + {"(*YCbCr).ColorModel", Method, 0}, + {"(*YCbCr).Opaque", Method, 0}, + {"(*YCbCr).RGBA64At", Method, 17}, + {"(*YCbCr).SubImage", Method, 0}, + {"(*YCbCr).YCbCrAt", Method, 4}, + {"(*YCbCr).YOffset", Method, 0}, + {"(Point).Add", Method, 0}, + {"(Point).Div", Method, 0}, + {"(Point).Eq", Method, 0}, + {"(Point).In", Method, 0}, + {"(Point).Mod", Method, 0}, + {"(Point).Mul", Method, 0}, + {"(Point).String", Method, 0}, + {"(Point).Sub", Method, 0}, + {"(Rectangle).Add", Method, 0}, + {"(Rectangle).At", Method, 5}, + {"(Rectangle).Bounds", Method, 5}, + {"(Rectangle).Canon", Method, 0}, + {"(Rectangle).ColorModel", Method, 5}, + {"(Rectangle).Dx", Method, 0}, + {"(Rectangle).Dy", Method, 0}, + {"(Rectangle).Empty", Method, 0}, + {"(Rectangle).Eq", Method, 0}, + {"(Rectangle).In", Method, 0}, + {"(Rectangle).Inset", Method, 0}, + {"(Rectangle).Intersect", Method, 0}, + {"(Rectangle).Overlaps", Method, 0}, + {"(Rectangle).RGBA64At", Method, 17}, + {"(Rectangle).Size", Method, 0}, + {"(Rectangle).String", Method, 0}, + {"(Rectangle).Sub", Method, 0}, + {"(Rectangle).Union", Method, 0}, + {"(YCbCrSubsampleRatio).String", Method, 0}, + {"Alpha", Type, 0}, + {"Alpha.Pix", Field, 0}, + {"Alpha.Rect", Field, 0}, + {"Alpha.Stride", Field, 0}, + {"Alpha16", Type, 0}, + {"Alpha16.Pix", Field, 0}, + {"Alpha16.Rect", Field, 0}, + {"Alpha16.Stride", Field, 0}, + {"Black", Var, 0}, + {"CMYK", Type, 5}, + {"CMYK.Pix", Field, 5}, + {"CMYK.Rect", Field, 5}, + {"CMYK.Stride", Field, 5}, + {"Config", Type, 0}, + {"Config.ColorModel", Field, 0}, + {"Config.Height", Field, 0}, + {"Config.Width", Field, 0}, + {"Decode", Func, 0}, + {"DecodeConfig", Func, 0}, + {"ErrFormat", Var, 0}, + {"Gray", Type, 0}, + {"Gray.Pix", Field, 0}, + {"Gray.Rect", Field, 0}, + {"Gray.Stride", Field, 0}, + {"Gray16", Type, 0}, + {"Gray16.Pix", Field, 0}, + {"Gray16.Rect", Field, 0}, + {"Gray16.Stride", Field, 0}, + {"Image", Type, 0}, + {"NRGBA", Type, 0}, + {"NRGBA.Pix", Field, 0}, + {"NRGBA.Rect", Field, 0}, + {"NRGBA.Stride", Field, 0}, + {"NRGBA64", Type, 0}, + {"NRGBA64.Pix", Field, 0}, + {"NRGBA64.Rect", Field, 0}, + {"NRGBA64.Stride", Field, 0}, + {"NYCbCrA", Type, 6}, + {"NYCbCrA.A", Field, 6}, + {"NYCbCrA.AStride", Field, 6}, + {"NYCbCrA.YCbCr", Field, 6}, + {"NewAlpha", Func, 0}, + {"NewAlpha16", Func, 0}, + {"NewCMYK", Func, 5}, + {"NewGray", Func, 0}, + {"NewGray16", Func, 0}, + {"NewNRGBA", Func, 0}, + {"NewNRGBA64", Func, 0}, + {"NewNYCbCrA", Func, 6}, + {"NewPaletted", Func, 0}, + {"NewRGBA", Func, 0}, + {"NewRGBA64", Func, 0}, + {"NewUniform", Func, 0}, + {"NewYCbCr", Func, 0}, + {"Opaque", Var, 0}, + {"Paletted", Type, 0}, + {"Paletted.Palette", Field, 0}, + {"Paletted.Pix", Field, 0}, + {"Paletted.Rect", Field, 0}, + {"Paletted.Stride", Field, 0}, + {"PalettedImage", Type, 0}, + {"Point", Type, 0}, + {"Point.X", Field, 0}, + {"Point.Y", Field, 0}, + {"Pt", Func, 0}, + {"RGBA", Type, 0}, + {"RGBA.Pix", Field, 0}, + {"RGBA.Rect", Field, 0}, + {"RGBA.Stride", Field, 0}, + {"RGBA64", Type, 0}, + {"RGBA64.Pix", Field, 0}, + {"RGBA64.Rect", Field, 0}, + {"RGBA64.Stride", Field, 0}, + {"RGBA64Image", Type, 17}, + {"Rect", Func, 0}, + {"Rectangle", Type, 0}, + {"Rectangle.Max", Field, 0}, + {"Rectangle.Min", Field, 0}, + {"RegisterFormat", Func, 0}, + {"Transparent", Var, 0}, + {"Uniform", Type, 0}, + {"Uniform.C", Field, 0}, + {"White", Var, 0}, + {"YCbCr", Type, 0}, + {"YCbCr.CStride", Field, 0}, + {"YCbCr.Cb", Field, 0}, + {"YCbCr.Cr", Field, 0}, + {"YCbCr.Rect", Field, 0}, + {"YCbCr.SubsampleRatio", Field, 0}, + {"YCbCr.Y", Field, 0}, + {"YCbCr.YStride", Field, 0}, + {"YCbCrSubsampleRatio", Type, 0}, + {"YCbCrSubsampleRatio410", Const, 5}, + {"YCbCrSubsampleRatio411", Const, 5}, + {"YCbCrSubsampleRatio420", Const, 0}, + {"YCbCrSubsampleRatio422", Const, 0}, + {"YCbCrSubsampleRatio440", Const, 1}, + {"YCbCrSubsampleRatio444", Const, 0}, + {"ZP", Var, 0}, + {"ZR", Var, 0}, + }, + "image/color": { + {"(Alpha).RGBA", Method, 0}, + {"(Alpha16).RGBA", Method, 0}, + {"(CMYK).RGBA", Method, 5}, + {"(Gray).RGBA", Method, 0}, + {"(Gray16).RGBA", Method, 0}, + {"(NRGBA).RGBA", Method, 0}, + {"(NRGBA64).RGBA", Method, 0}, + {"(NYCbCrA).RGBA", Method, 6}, + {"(Palette).Convert", Method, 0}, + {"(Palette).Index", Method, 0}, + {"(RGBA).RGBA", Method, 0}, + {"(RGBA64).RGBA", Method, 0}, + {"(YCbCr).RGBA", Method, 0}, + {"Alpha", Type, 0}, + {"Alpha.A", Field, 0}, + {"Alpha16", Type, 0}, + {"Alpha16.A", Field, 0}, + {"Alpha16Model", Var, 0}, + {"AlphaModel", Var, 0}, + {"Black", Var, 0}, + {"CMYK", Type, 5}, + {"CMYK.C", Field, 5}, + {"CMYK.K", Field, 5}, + {"CMYK.M", Field, 5}, + {"CMYK.Y", Field, 5}, + {"CMYKModel", Var, 5}, + {"CMYKToRGB", Func, 5}, + {"Color", Type, 0}, + {"Gray", Type, 0}, + {"Gray.Y", Field, 0}, + {"Gray16", Type, 0}, + {"Gray16.Y", Field, 0}, + {"Gray16Model", Var, 0}, + {"GrayModel", Var, 0}, + {"Model", Type, 0}, + {"ModelFunc", Func, 0}, + {"NRGBA", Type, 0}, + {"NRGBA.A", Field, 0}, + {"NRGBA.B", Field, 0}, + {"NRGBA.G", Field, 0}, + {"NRGBA.R", Field, 0}, + {"NRGBA64", Type, 0}, + {"NRGBA64.A", Field, 0}, + {"NRGBA64.B", Field, 0}, + {"NRGBA64.G", Field, 0}, + {"NRGBA64.R", Field, 0}, + {"NRGBA64Model", Var, 0}, + {"NRGBAModel", Var, 0}, + {"NYCbCrA", Type, 6}, + {"NYCbCrA.A", Field, 6}, + {"NYCbCrA.YCbCr", Field, 6}, + {"NYCbCrAModel", Var, 6}, + {"Opaque", Var, 0}, + {"Palette", Type, 0}, + {"RGBA", Type, 0}, + {"RGBA.A", Field, 0}, + {"RGBA.B", Field, 0}, + {"RGBA.G", Field, 0}, + {"RGBA.R", Field, 0}, + {"RGBA64", Type, 0}, + {"RGBA64.A", Field, 0}, + {"RGBA64.B", Field, 0}, + {"RGBA64.G", Field, 0}, + {"RGBA64.R", Field, 0}, + {"RGBA64Model", Var, 0}, + {"RGBAModel", Var, 0}, + {"RGBToCMYK", Func, 5}, + {"RGBToYCbCr", Func, 0}, + {"Transparent", Var, 0}, + {"White", Var, 0}, + {"YCbCr", Type, 0}, + {"YCbCr.Cb", Field, 0}, + {"YCbCr.Cr", Field, 0}, + {"YCbCr.Y", Field, 0}, + {"YCbCrModel", Var, 0}, + {"YCbCrToRGB", Func, 0}, + }, + "image/color/palette": { + {"Plan9", Var, 2}, + {"WebSafe", Var, 2}, + }, + "image/draw": { + {"(Op).Draw", Method, 2}, + {"Draw", Func, 0}, + {"DrawMask", Func, 0}, + {"Drawer", Type, 2}, + {"FloydSteinberg", Var, 2}, + {"Image", Type, 0}, + {"Op", Type, 0}, + {"Over", Const, 0}, + {"Quantizer", Type, 2}, + {"RGBA64Image", Type, 17}, + {"Src", Const, 0}, + }, + "image/gif": { + {"Decode", Func, 0}, + {"DecodeAll", Func, 0}, + {"DecodeConfig", Func, 0}, + {"DisposalBackground", Const, 5}, + {"DisposalNone", Const, 5}, + {"DisposalPrevious", Const, 5}, + {"Encode", Func, 2}, + {"EncodeAll", Func, 2}, + {"GIF", Type, 0}, + {"GIF.BackgroundIndex", Field, 5}, + {"GIF.Config", Field, 5}, + {"GIF.Delay", Field, 0}, + {"GIF.Disposal", Field, 5}, + {"GIF.Image", Field, 0}, + {"GIF.LoopCount", Field, 0}, + {"Options", Type, 2}, + {"Options.Drawer", Field, 2}, + {"Options.NumColors", Field, 2}, + {"Options.Quantizer", Field, 2}, + }, + "image/jpeg": { + {"(FormatError).Error", Method, 0}, + {"(UnsupportedError).Error", Method, 0}, + {"Decode", Func, 0}, + {"DecodeConfig", Func, 0}, + {"DefaultQuality", Const, 0}, + {"Encode", Func, 0}, + {"FormatError", Type, 0}, + {"Options", Type, 0}, + {"Options.Quality", Field, 0}, + {"Reader", Type, 0}, + {"UnsupportedError", Type, 0}, + }, + "image/png": { + {"(*Encoder).Encode", Method, 4}, + {"(FormatError).Error", Method, 0}, + {"(UnsupportedError).Error", Method, 0}, + {"BestCompression", Const, 4}, + {"BestSpeed", Const, 4}, + {"CompressionLevel", Type, 4}, + {"Decode", Func, 0}, + {"DecodeConfig", Func, 0}, + {"DefaultCompression", Const, 4}, + {"Encode", Func, 0}, + {"Encoder", Type, 4}, + {"Encoder.BufferPool", Field, 9}, + {"Encoder.CompressionLevel", Field, 4}, + {"EncoderBuffer", Type, 9}, + {"EncoderBufferPool", Type, 9}, + {"FormatError", Type, 0}, + {"NoCompression", Const, 4}, + {"UnsupportedError", Type, 0}, + }, + "index/suffixarray": { + {"(*Index).Bytes", Method, 0}, + {"(*Index).FindAllIndex", Method, 0}, + {"(*Index).Lookup", Method, 0}, + {"(*Index).Read", Method, 0}, + {"(*Index).Write", Method, 0}, + {"Index", Type, 0}, + {"New", Func, 0}, + }, + "io": { + {"(*LimitedReader).Read", Method, 0}, + {"(*OffsetWriter).Seek", Method, 20}, + {"(*OffsetWriter).Write", Method, 20}, + {"(*OffsetWriter).WriteAt", Method, 20}, + {"(*PipeReader).Close", Method, 0}, + {"(*PipeReader).CloseWithError", Method, 0}, + {"(*PipeReader).Read", Method, 0}, + {"(*PipeWriter).Close", Method, 0}, + {"(*PipeWriter).CloseWithError", Method, 0}, + {"(*PipeWriter).Write", Method, 0}, + {"(*SectionReader).Outer", Method, 22}, + {"(*SectionReader).Read", Method, 0}, + {"(*SectionReader).ReadAt", Method, 0}, + {"(*SectionReader).Seek", Method, 0}, + {"(*SectionReader).Size", Method, 0}, + {"ByteReader", Type, 0}, + {"ByteScanner", Type, 0}, + {"ByteWriter", Type, 1}, + {"Closer", Type, 0}, + {"Copy", Func, 0}, + {"CopyBuffer", Func, 5}, + {"CopyN", Func, 0}, + {"Discard", Var, 16}, + {"EOF", Var, 0}, + {"ErrClosedPipe", Var, 0}, + {"ErrNoProgress", Var, 1}, + {"ErrShortBuffer", Var, 0}, + {"ErrShortWrite", Var, 0}, + {"ErrUnexpectedEOF", Var, 0}, + {"LimitReader", Func, 0}, + {"LimitedReader", Type, 0}, + {"LimitedReader.N", Field, 0}, + {"LimitedReader.R", Field, 0}, + {"MultiReader", Func, 0}, + {"MultiWriter", Func, 0}, + {"NewOffsetWriter", Func, 20}, + {"NewSectionReader", Func, 0}, + {"NopCloser", Func, 16}, + {"OffsetWriter", Type, 20}, + {"Pipe", Func, 0}, + {"PipeReader", Type, 0}, + {"PipeWriter", Type, 0}, + {"ReadAll", Func, 16}, + {"ReadAtLeast", Func, 0}, + {"ReadCloser", Type, 0}, + {"ReadFull", Func, 0}, + {"ReadSeekCloser", Type, 16}, + {"ReadSeeker", Type, 0}, + {"ReadWriteCloser", Type, 0}, + {"ReadWriteSeeker", Type, 0}, + {"ReadWriter", Type, 0}, + {"Reader", Type, 0}, + {"ReaderAt", Type, 0}, + {"ReaderFrom", Type, 0}, + {"RuneReader", Type, 0}, + {"RuneScanner", Type, 0}, + {"SectionReader", Type, 0}, + {"SeekCurrent", Const, 7}, + {"SeekEnd", Const, 7}, + {"SeekStart", Const, 7}, + {"Seeker", Type, 0}, + {"StringWriter", Type, 12}, + {"TeeReader", Func, 0}, + {"WriteCloser", Type, 0}, + {"WriteSeeker", Type, 0}, + {"WriteString", Func, 0}, + {"Writer", Type, 0}, + {"WriterAt", Type, 0}, + {"WriterTo", Type, 0}, + }, + "io/fs": { + {"(*PathError).Error", Method, 16}, + {"(*PathError).Timeout", Method, 16}, + {"(*PathError).Unwrap", Method, 16}, + {"(FileMode).IsDir", Method, 16}, + {"(FileMode).IsRegular", Method, 16}, + {"(FileMode).Perm", Method, 16}, + {"(FileMode).String", Method, 16}, + {"(FileMode).Type", Method, 16}, + {"DirEntry", Type, 16}, + {"ErrClosed", Var, 16}, + {"ErrExist", Var, 16}, + {"ErrInvalid", Var, 16}, + {"ErrNotExist", Var, 16}, + {"ErrPermission", Var, 16}, + {"FS", Type, 16}, + {"File", Type, 16}, + {"FileInfo", Type, 16}, + {"FileInfoToDirEntry", Func, 17}, + {"FileMode", Type, 16}, + {"FormatDirEntry", Func, 21}, + {"FormatFileInfo", Func, 21}, + {"Glob", Func, 16}, + {"GlobFS", Type, 16}, + {"ModeAppend", Const, 16}, + {"ModeCharDevice", Const, 16}, + {"ModeDevice", Const, 16}, + {"ModeDir", Const, 16}, + {"ModeExclusive", Const, 16}, + {"ModeIrregular", Const, 16}, + {"ModeNamedPipe", Const, 16}, + {"ModePerm", Const, 16}, + {"ModeSetgid", Const, 16}, + {"ModeSetuid", Const, 16}, + {"ModeSocket", Const, 16}, + {"ModeSticky", Const, 16}, + {"ModeSymlink", Const, 16}, + {"ModeTemporary", Const, 16}, + {"ModeType", Const, 16}, + {"PathError", Type, 16}, + {"PathError.Err", Field, 16}, + {"PathError.Op", Field, 16}, + {"PathError.Path", Field, 16}, + {"ReadDir", Func, 16}, + {"ReadDirFS", Type, 16}, + {"ReadDirFile", Type, 16}, + {"ReadFile", Func, 16}, + {"ReadFileFS", Type, 16}, + {"SkipAll", Var, 20}, + {"SkipDir", Var, 16}, + {"Stat", Func, 16}, + {"StatFS", Type, 16}, + {"Sub", Func, 16}, + {"SubFS", Type, 16}, + {"ValidPath", Func, 16}, + {"WalkDir", Func, 16}, + {"WalkDirFunc", Type, 16}, + }, + "io/ioutil": { + {"Discard", Var, 0}, + {"NopCloser", Func, 0}, + {"ReadAll", Func, 0}, + {"ReadDir", Func, 0}, + {"ReadFile", Func, 0}, + {"TempDir", Func, 0}, + {"TempFile", Func, 0}, + {"WriteFile", Func, 0}, + }, + "log": { + {"(*Logger).Fatal", Method, 0}, + {"(*Logger).Fatalf", Method, 0}, + {"(*Logger).Fatalln", Method, 0}, + {"(*Logger).Flags", Method, 0}, + {"(*Logger).Output", Method, 0}, + {"(*Logger).Panic", Method, 0}, + {"(*Logger).Panicf", Method, 0}, + {"(*Logger).Panicln", Method, 0}, + {"(*Logger).Prefix", Method, 0}, + {"(*Logger).Print", Method, 0}, + {"(*Logger).Printf", Method, 0}, + {"(*Logger).Println", Method, 0}, + {"(*Logger).SetFlags", Method, 0}, + {"(*Logger).SetOutput", Method, 5}, + {"(*Logger).SetPrefix", Method, 0}, + {"(*Logger).Writer", Method, 12}, + {"Default", Func, 16}, + {"Fatal", Func, 0}, + {"Fatalf", Func, 0}, + {"Fatalln", Func, 0}, + {"Flags", Func, 0}, + {"LUTC", Const, 5}, + {"Ldate", Const, 0}, + {"Llongfile", Const, 0}, + {"Lmicroseconds", Const, 0}, + {"Lmsgprefix", Const, 14}, + {"Logger", Type, 0}, + {"Lshortfile", Const, 0}, + {"LstdFlags", Const, 0}, + {"Ltime", Const, 0}, + {"New", Func, 0}, + {"Output", Func, 5}, + {"Panic", Func, 0}, + {"Panicf", Func, 0}, + {"Panicln", Func, 0}, + {"Prefix", Func, 0}, + {"Print", Func, 0}, + {"Printf", Func, 0}, + {"Println", Func, 0}, + {"SetFlags", Func, 0}, + {"SetOutput", Func, 0}, + {"SetPrefix", Func, 0}, + {"Writer", Func, 13}, + }, + "log/slog": { + {"(*JSONHandler).Enabled", Method, 21}, + {"(*JSONHandler).Handle", Method, 21}, + {"(*JSONHandler).WithAttrs", Method, 21}, + {"(*JSONHandler).WithGroup", Method, 21}, + {"(*Level).UnmarshalJSON", Method, 21}, + {"(*Level).UnmarshalText", Method, 21}, + {"(*LevelVar).Level", Method, 21}, + {"(*LevelVar).MarshalText", Method, 21}, + {"(*LevelVar).Set", Method, 21}, + {"(*LevelVar).String", Method, 21}, + {"(*LevelVar).UnmarshalText", Method, 21}, + {"(*Logger).Debug", Method, 21}, + {"(*Logger).DebugContext", Method, 21}, + {"(*Logger).Enabled", Method, 21}, + {"(*Logger).Error", Method, 21}, + {"(*Logger).ErrorContext", Method, 21}, + {"(*Logger).Handler", Method, 21}, + {"(*Logger).Info", Method, 21}, + {"(*Logger).InfoContext", Method, 21}, + {"(*Logger).Log", Method, 21}, + {"(*Logger).LogAttrs", Method, 21}, + {"(*Logger).Warn", Method, 21}, + {"(*Logger).WarnContext", Method, 21}, + {"(*Logger).With", Method, 21}, + {"(*Logger).WithGroup", Method, 21}, + {"(*Record).Add", Method, 21}, + {"(*Record).AddAttrs", Method, 21}, + {"(*TextHandler).Enabled", Method, 21}, + {"(*TextHandler).Handle", Method, 21}, + {"(*TextHandler).WithAttrs", Method, 21}, + {"(*TextHandler).WithGroup", Method, 21}, + {"(Attr).Equal", Method, 21}, + {"(Attr).String", Method, 21}, + {"(Kind).String", Method, 21}, + {"(Level).Level", Method, 21}, + {"(Level).MarshalJSON", Method, 21}, + {"(Level).MarshalText", Method, 21}, + {"(Level).String", Method, 21}, + {"(Record).Attrs", Method, 21}, + {"(Record).Clone", Method, 21}, + {"(Record).NumAttrs", Method, 21}, + {"(Value).Any", Method, 21}, + {"(Value).Bool", Method, 21}, + {"(Value).Duration", Method, 21}, + {"(Value).Equal", Method, 21}, + {"(Value).Float64", Method, 21}, + {"(Value).Group", Method, 21}, + {"(Value).Int64", Method, 21}, + {"(Value).Kind", Method, 21}, + {"(Value).LogValuer", Method, 21}, + {"(Value).Resolve", Method, 21}, + {"(Value).String", Method, 21}, + {"(Value).Time", Method, 21}, + {"(Value).Uint64", Method, 21}, + {"Any", Func, 21}, + {"AnyValue", Func, 21}, + {"Attr", Type, 21}, + {"Attr.Key", Field, 21}, + {"Attr.Value", Field, 21}, + {"Bool", Func, 21}, + {"BoolValue", Func, 21}, + {"Debug", Func, 21}, + {"DebugContext", Func, 21}, + {"Default", Func, 21}, + {"Duration", Func, 21}, + {"DurationValue", Func, 21}, + {"Error", Func, 21}, + {"ErrorContext", Func, 21}, + {"Float64", Func, 21}, + {"Float64Value", Func, 21}, + {"Group", Func, 21}, + {"GroupValue", Func, 21}, + {"Handler", Type, 21}, + {"HandlerOptions", Type, 21}, + {"HandlerOptions.AddSource", Field, 21}, + {"HandlerOptions.Level", Field, 21}, + {"HandlerOptions.ReplaceAttr", Field, 21}, + {"Info", Func, 21}, + {"InfoContext", Func, 21}, + {"Int", Func, 21}, + {"Int64", Func, 21}, + {"Int64Value", Func, 21}, + {"IntValue", Func, 21}, + {"JSONHandler", Type, 21}, + {"Kind", Type, 21}, + {"KindAny", Const, 21}, + {"KindBool", Const, 21}, + {"KindDuration", Const, 21}, + {"KindFloat64", Const, 21}, + {"KindGroup", Const, 21}, + {"KindInt64", Const, 21}, + {"KindLogValuer", Const, 21}, + {"KindString", Const, 21}, + {"KindTime", Const, 21}, + {"KindUint64", Const, 21}, + {"Level", Type, 21}, + {"LevelDebug", Const, 21}, + {"LevelError", Const, 21}, + {"LevelInfo", Const, 21}, + {"LevelKey", Const, 21}, + {"LevelVar", Type, 21}, + {"LevelWarn", Const, 21}, + {"Leveler", Type, 21}, + {"Log", Func, 21}, + {"LogAttrs", Func, 21}, + {"LogValuer", Type, 21}, + {"Logger", Type, 21}, + {"MessageKey", Const, 21}, + {"New", Func, 21}, + {"NewJSONHandler", Func, 21}, + {"NewLogLogger", Func, 21}, + {"NewRecord", Func, 21}, + {"NewTextHandler", Func, 21}, + {"Record", Type, 21}, + {"Record.Level", Field, 21}, + {"Record.Message", Field, 21}, + {"Record.PC", Field, 21}, + {"Record.Time", Field, 21}, + {"SetDefault", Func, 21}, + {"SetLogLoggerLevel", Func, 22}, + {"Source", Type, 21}, + {"Source.File", Field, 21}, + {"Source.Function", Field, 21}, + {"Source.Line", Field, 21}, + {"SourceKey", Const, 21}, + {"String", Func, 21}, + {"StringValue", Func, 21}, + {"TextHandler", Type, 21}, + {"Time", Func, 21}, + {"TimeKey", Const, 21}, + {"TimeValue", Func, 21}, + {"Uint64", Func, 21}, + {"Uint64Value", Func, 21}, + {"Value", Type, 21}, + {"Warn", Func, 21}, + {"WarnContext", Func, 21}, + {"With", Func, 21}, + }, + "log/syslog": { + {"(*Writer).Alert", Method, 0}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Crit", Method, 0}, + {"(*Writer).Debug", Method, 0}, + {"(*Writer).Emerg", Method, 0}, + {"(*Writer).Err", Method, 0}, + {"(*Writer).Info", Method, 0}, + {"(*Writer).Notice", Method, 0}, + {"(*Writer).Warning", Method, 0}, + {"(*Writer).Write", Method, 0}, + {"Dial", Func, 0}, + {"LOG_ALERT", Const, 0}, + {"LOG_AUTH", Const, 1}, + {"LOG_AUTHPRIV", Const, 1}, + {"LOG_CRIT", Const, 0}, + {"LOG_CRON", Const, 1}, + {"LOG_DAEMON", Const, 1}, + {"LOG_DEBUG", Const, 0}, + {"LOG_EMERG", Const, 0}, + {"LOG_ERR", Const, 0}, + {"LOG_FTP", Const, 1}, + {"LOG_INFO", Const, 0}, + {"LOG_KERN", Const, 1}, + {"LOG_LOCAL0", Const, 1}, + {"LOG_LOCAL1", Const, 1}, + {"LOG_LOCAL2", Const, 1}, + {"LOG_LOCAL3", Const, 1}, + {"LOG_LOCAL4", Const, 1}, + {"LOG_LOCAL5", Const, 1}, + {"LOG_LOCAL6", Const, 1}, + {"LOG_LOCAL7", Const, 1}, + {"LOG_LPR", Const, 1}, + {"LOG_MAIL", Const, 1}, + {"LOG_NEWS", Const, 1}, + {"LOG_NOTICE", Const, 0}, + {"LOG_SYSLOG", Const, 1}, + {"LOG_USER", Const, 1}, + {"LOG_UUCP", Const, 1}, + {"LOG_WARNING", Const, 0}, + {"New", Func, 0}, + {"NewLogger", Func, 0}, + {"Priority", Type, 0}, + {"Writer", Type, 0}, + }, + "maps": { + {"Clone", Func, 21}, + {"Copy", Func, 21}, + {"DeleteFunc", Func, 21}, + {"Equal", Func, 21}, + {"EqualFunc", Func, 21}, + }, + "math": { + {"Abs", Func, 0}, + {"Acos", Func, 0}, + {"Acosh", Func, 0}, + {"Asin", Func, 0}, + {"Asinh", Func, 0}, + {"Atan", Func, 0}, + {"Atan2", Func, 0}, + {"Atanh", Func, 0}, + {"Cbrt", Func, 0}, + {"Ceil", Func, 0}, + {"Copysign", Func, 0}, + {"Cos", Func, 0}, + {"Cosh", Func, 0}, + {"Dim", Func, 0}, + {"E", Const, 0}, + {"Erf", Func, 0}, + {"Erfc", Func, 0}, + {"Erfcinv", Func, 10}, + {"Erfinv", Func, 10}, + {"Exp", Func, 0}, + {"Exp2", Func, 0}, + {"Expm1", Func, 0}, + {"FMA", Func, 14}, + {"Float32bits", Func, 0}, + {"Float32frombits", Func, 0}, + {"Float64bits", Func, 0}, + {"Float64frombits", Func, 0}, + {"Floor", Func, 0}, + {"Frexp", Func, 0}, + {"Gamma", Func, 0}, + {"Hypot", Func, 0}, + {"Ilogb", Func, 0}, + {"Inf", Func, 0}, + {"IsInf", Func, 0}, + {"IsNaN", Func, 0}, + {"J0", Func, 0}, + {"J1", Func, 0}, + {"Jn", Func, 0}, + {"Ldexp", Func, 0}, + {"Lgamma", Func, 0}, + {"Ln10", Const, 0}, + {"Ln2", Const, 0}, + {"Log", Func, 0}, + {"Log10", Func, 0}, + {"Log10E", Const, 0}, + {"Log1p", Func, 0}, + {"Log2", Func, 0}, + {"Log2E", Const, 0}, + {"Logb", Func, 0}, + {"Max", Func, 0}, + {"MaxFloat32", Const, 0}, + {"MaxFloat64", Const, 0}, + {"MaxInt", Const, 17}, + {"MaxInt16", Const, 0}, + {"MaxInt32", Const, 0}, + {"MaxInt64", Const, 0}, + {"MaxInt8", Const, 0}, + {"MaxUint", Const, 17}, + {"MaxUint16", Const, 0}, + {"MaxUint32", Const, 0}, + {"MaxUint64", Const, 0}, + {"MaxUint8", Const, 0}, + {"Min", Func, 0}, + {"MinInt", Const, 17}, + {"MinInt16", Const, 0}, + {"MinInt32", Const, 0}, + {"MinInt64", Const, 0}, + {"MinInt8", Const, 0}, + {"Mod", Func, 0}, + {"Modf", Func, 0}, + {"NaN", Func, 0}, + {"Nextafter", Func, 0}, + {"Nextafter32", Func, 4}, + {"Phi", Const, 0}, + {"Pi", Const, 0}, + {"Pow", Func, 0}, + {"Pow10", Func, 0}, + {"Remainder", Func, 0}, + {"Round", Func, 10}, + {"RoundToEven", Func, 10}, + {"Signbit", Func, 0}, + {"Sin", Func, 0}, + {"Sincos", Func, 0}, + {"Sinh", Func, 0}, + {"SmallestNonzeroFloat32", Const, 0}, + {"SmallestNonzeroFloat64", Const, 0}, + {"Sqrt", Func, 0}, + {"Sqrt2", Const, 0}, + {"SqrtE", Const, 0}, + {"SqrtPhi", Const, 0}, + {"SqrtPi", Const, 0}, + {"Tan", Func, 0}, + {"Tanh", Func, 0}, + {"Trunc", Func, 0}, + {"Y0", Func, 0}, + {"Y1", Func, 0}, + {"Yn", Func, 0}, + }, + "math/big": { + {"(*Float).Abs", Method, 5}, + {"(*Float).Acc", Method, 5}, + {"(*Float).Add", Method, 5}, + {"(*Float).Append", Method, 5}, + {"(*Float).Cmp", Method, 5}, + {"(*Float).Copy", Method, 5}, + {"(*Float).Float32", Method, 5}, + {"(*Float).Float64", Method, 5}, + {"(*Float).Format", Method, 5}, + {"(*Float).GobDecode", Method, 7}, + {"(*Float).GobEncode", Method, 7}, + {"(*Float).Int", Method, 5}, + {"(*Float).Int64", Method, 5}, + {"(*Float).IsInf", Method, 5}, + {"(*Float).IsInt", Method, 5}, + {"(*Float).MantExp", Method, 5}, + {"(*Float).MarshalText", Method, 6}, + {"(*Float).MinPrec", Method, 5}, + {"(*Float).Mode", Method, 5}, + {"(*Float).Mul", Method, 5}, + {"(*Float).Neg", Method, 5}, + {"(*Float).Parse", Method, 5}, + {"(*Float).Prec", Method, 5}, + {"(*Float).Quo", Method, 5}, + {"(*Float).Rat", Method, 5}, + {"(*Float).Scan", Method, 8}, + {"(*Float).Set", Method, 5}, + {"(*Float).SetFloat64", Method, 5}, + {"(*Float).SetInf", Method, 5}, + {"(*Float).SetInt", Method, 5}, + {"(*Float).SetInt64", Method, 5}, + {"(*Float).SetMantExp", Method, 5}, + {"(*Float).SetMode", Method, 5}, + {"(*Float).SetPrec", Method, 5}, + {"(*Float).SetRat", Method, 5}, + {"(*Float).SetString", Method, 5}, + {"(*Float).SetUint64", Method, 5}, + {"(*Float).Sign", Method, 5}, + {"(*Float).Signbit", Method, 5}, + {"(*Float).Sqrt", Method, 10}, + {"(*Float).String", Method, 5}, + {"(*Float).Sub", Method, 5}, + {"(*Float).Text", Method, 5}, + {"(*Float).Uint64", Method, 5}, + {"(*Float).UnmarshalText", Method, 6}, + {"(*Int).Abs", Method, 0}, + {"(*Int).Add", Method, 0}, + {"(*Int).And", Method, 0}, + {"(*Int).AndNot", Method, 0}, + {"(*Int).Append", Method, 6}, + {"(*Int).Binomial", Method, 0}, + {"(*Int).Bit", Method, 0}, + {"(*Int).BitLen", Method, 0}, + {"(*Int).Bits", Method, 0}, + {"(*Int).Bytes", Method, 0}, + {"(*Int).Cmp", Method, 0}, + {"(*Int).CmpAbs", Method, 10}, + {"(*Int).Div", Method, 0}, + {"(*Int).DivMod", Method, 0}, + {"(*Int).Exp", Method, 0}, + {"(*Int).FillBytes", Method, 15}, + {"(*Int).Float64", Method, 21}, + {"(*Int).Format", Method, 0}, + {"(*Int).GCD", Method, 0}, + {"(*Int).GobDecode", Method, 0}, + {"(*Int).GobEncode", Method, 0}, + {"(*Int).Int64", Method, 0}, + {"(*Int).IsInt64", Method, 9}, + {"(*Int).IsUint64", Method, 9}, + {"(*Int).Lsh", Method, 0}, + {"(*Int).MarshalJSON", Method, 1}, + {"(*Int).MarshalText", Method, 3}, + {"(*Int).Mod", Method, 0}, + {"(*Int).ModInverse", Method, 0}, + {"(*Int).ModSqrt", Method, 5}, + {"(*Int).Mul", Method, 0}, + {"(*Int).MulRange", Method, 0}, + {"(*Int).Neg", Method, 0}, + {"(*Int).Not", Method, 0}, + {"(*Int).Or", Method, 0}, + {"(*Int).ProbablyPrime", Method, 0}, + {"(*Int).Quo", Method, 0}, + {"(*Int).QuoRem", Method, 0}, + {"(*Int).Rand", Method, 0}, + {"(*Int).Rem", Method, 0}, + {"(*Int).Rsh", Method, 0}, + {"(*Int).Scan", Method, 0}, + {"(*Int).Set", Method, 0}, + {"(*Int).SetBit", Method, 0}, + {"(*Int).SetBits", Method, 0}, + {"(*Int).SetBytes", Method, 0}, + {"(*Int).SetInt64", Method, 0}, + {"(*Int).SetString", Method, 0}, + {"(*Int).SetUint64", Method, 1}, + {"(*Int).Sign", Method, 0}, + {"(*Int).Sqrt", Method, 8}, + {"(*Int).String", Method, 0}, + {"(*Int).Sub", Method, 0}, + {"(*Int).Text", Method, 6}, + {"(*Int).TrailingZeroBits", Method, 13}, + {"(*Int).Uint64", Method, 1}, + {"(*Int).UnmarshalJSON", Method, 1}, + {"(*Int).UnmarshalText", Method, 3}, + {"(*Int).Xor", Method, 0}, + {"(*Rat).Abs", Method, 0}, + {"(*Rat).Add", Method, 0}, + {"(*Rat).Cmp", Method, 0}, + {"(*Rat).Denom", Method, 0}, + {"(*Rat).Float32", Method, 4}, + {"(*Rat).Float64", Method, 1}, + {"(*Rat).FloatPrec", Method, 22}, + {"(*Rat).FloatString", Method, 0}, + {"(*Rat).GobDecode", Method, 0}, + {"(*Rat).GobEncode", Method, 0}, + {"(*Rat).Inv", Method, 0}, + {"(*Rat).IsInt", Method, 0}, + {"(*Rat).MarshalText", Method, 3}, + {"(*Rat).Mul", Method, 0}, + {"(*Rat).Neg", Method, 0}, + {"(*Rat).Num", Method, 0}, + {"(*Rat).Quo", Method, 0}, + {"(*Rat).RatString", Method, 0}, + {"(*Rat).Scan", Method, 0}, + {"(*Rat).Set", Method, 0}, + {"(*Rat).SetFloat64", Method, 1}, + {"(*Rat).SetFrac", Method, 0}, + {"(*Rat).SetFrac64", Method, 0}, + {"(*Rat).SetInt", Method, 0}, + {"(*Rat).SetInt64", Method, 0}, + {"(*Rat).SetString", Method, 0}, + {"(*Rat).SetUint64", Method, 13}, + {"(*Rat).Sign", Method, 0}, + {"(*Rat).String", Method, 0}, + {"(*Rat).Sub", Method, 0}, + {"(*Rat).UnmarshalText", Method, 3}, + {"(Accuracy).String", Method, 5}, + {"(ErrNaN).Error", Method, 5}, + {"(RoundingMode).String", Method, 5}, + {"Above", Const, 5}, + {"Accuracy", Type, 5}, + {"AwayFromZero", Const, 5}, + {"Below", Const, 5}, + {"ErrNaN", Type, 5}, + {"Exact", Const, 5}, + {"Float", Type, 5}, + {"Int", Type, 0}, + {"Jacobi", Func, 5}, + {"MaxBase", Const, 0}, + {"MaxExp", Const, 5}, + {"MaxPrec", Const, 5}, + {"MinExp", Const, 5}, + {"NewFloat", Func, 5}, + {"NewInt", Func, 0}, + {"NewRat", Func, 0}, + {"ParseFloat", Func, 5}, + {"Rat", Type, 0}, + {"RoundingMode", Type, 5}, + {"ToNearestAway", Const, 5}, + {"ToNearestEven", Const, 5}, + {"ToNegativeInf", Const, 5}, + {"ToPositiveInf", Const, 5}, + {"ToZero", Const, 5}, + {"Word", Type, 0}, + }, + "math/bits": { + {"Add", Func, 12}, + {"Add32", Func, 12}, + {"Add64", Func, 12}, + {"Div", Func, 12}, + {"Div32", Func, 12}, + {"Div64", Func, 12}, + {"LeadingZeros", Func, 9}, + {"LeadingZeros16", Func, 9}, + {"LeadingZeros32", Func, 9}, + {"LeadingZeros64", Func, 9}, + {"LeadingZeros8", Func, 9}, + {"Len", Func, 9}, + {"Len16", Func, 9}, + {"Len32", Func, 9}, + {"Len64", Func, 9}, + {"Len8", Func, 9}, + {"Mul", Func, 12}, + {"Mul32", Func, 12}, + {"Mul64", Func, 12}, + {"OnesCount", Func, 9}, + {"OnesCount16", Func, 9}, + {"OnesCount32", Func, 9}, + {"OnesCount64", Func, 9}, + {"OnesCount8", Func, 9}, + {"Rem", Func, 14}, + {"Rem32", Func, 14}, + {"Rem64", Func, 14}, + {"Reverse", Func, 9}, + {"Reverse16", Func, 9}, + {"Reverse32", Func, 9}, + {"Reverse64", Func, 9}, + {"Reverse8", Func, 9}, + {"ReverseBytes", Func, 9}, + {"ReverseBytes16", Func, 9}, + {"ReverseBytes32", Func, 9}, + {"ReverseBytes64", Func, 9}, + {"RotateLeft", Func, 9}, + {"RotateLeft16", Func, 9}, + {"RotateLeft32", Func, 9}, + {"RotateLeft64", Func, 9}, + {"RotateLeft8", Func, 9}, + {"Sub", Func, 12}, + {"Sub32", Func, 12}, + {"Sub64", Func, 12}, + {"TrailingZeros", Func, 9}, + {"TrailingZeros16", Func, 9}, + {"TrailingZeros32", Func, 9}, + {"TrailingZeros64", Func, 9}, + {"TrailingZeros8", Func, 9}, + {"UintSize", Const, 9}, + }, + "math/cmplx": { + {"Abs", Func, 0}, + {"Acos", Func, 0}, + {"Acosh", Func, 0}, + {"Asin", Func, 0}, + {"Asinh", Func, 0}, + {"Atan", Func, 0}, + {"Atanh", Func, 0}, + {"Conj", Func, 0}, + {"Cos", Func, 0}, + {"Cosh", Func, 0}, + {"Cot", Func, 0}, + {"Exp", Func, 0}, + {"Inf", Func, 0}, + {"IsInf", Func, 0}, + {"IsNaN", Func, 0}, + {"Log", Func, 0}, + {"Log10", Func, 0}, + {"NaN", Func, 0}, + {"Phase", Func, 0}, + {"Polar", Func, 0}, + {"Pow", Func, 0}, + {"Rect", Func, 0}, + {"Sin", Func, 0}, + {"Sinh", Func, 0}, + {"Sqrt", Func, 0}, + {"Tan", Func, 0}, + {"Tanh", Func, 0}, + }, + "math/rand": { + {"(*Rand).ExpFloat64", Method, 0}, + {"(*Rand).Float32", Method, 0}, + {"(*Rand).Float64", Method, 0}, + {"(*Rand).Int", Method, 0}, + {"(*Rand).Int31", Method, 0}, + {"(*Rand).Int31n", Method, 0}, + {"(*Rand).Int63", Method, 0}, + {"(*Rand).Int63n", Method, 0}, + {"(*Rand).Intn", Method, 0}, + {"(*Rand).NormFloat64", Method, 0}, + {"(*Rand).Perm", Method, 0}, + {"(*Rand).Read", Method, 6}, + {"(*Rand).Seed", Method, 0}, + {"(*Rand).Shuffle", Method, 10}, + {"(*Rand).Uint32", Method, 0}, + {"(*Rand).Uint64", Method, 8}, + {"(*Zipf).Uint64", Method, 0}, + {"ExpFloat64", Func, 0}, + {"Float32", Func, 0}, + {"Float64", Func, 0}, + {"Int", Func, 0}, + {"Int31", Func, 0}, + {"Int31n", Func, 0}, + {"Int63", Func, 0}, + {"Int63n", Func, 0}, + {"Intn", Func, 0}, + {"New", Func, 0}, + {"NewSource", Func, 0}, + {"NewZipf", Func, 0}, + {"NormFloat64", Func, 0}, + {"Perm", Func, 0}, + {"Rand", Type, 0}, + {"Read", Func, 6}, + {"Seed", Func, 0}, + {"Shuffle", Func, 10}, + {"Source", Type, 0}, + {"Source64", Type, 8}, + {"Uint32", Func, 0}, + {"Uint64", Func, 8}, + {"Zipf", Type, 0}, + }, + "math/rand/v2": { + {"(*ChaCha8).MarshalBinary", Method, 22}, + {"(*ChaCha8).Seed", Method, 22}, + {"(*ChaCha8).Uint64", Method, 22}, + {"(*ChaCha8).UnmarshalBinary", Method, 22}, + {"(*PCG).MarshalBinary", Method, 22}, + {"(*PCG).Seed", Method, 22}, + {"(*PCG).Uint64", Method, 22}, + {"(*PCG).UnmarshalBinary", Method, 22}, + {"(*Rand).ExpFloat64", Method, 22}, + {"(*Rand).Float32", Method, 22}, + {"(*Rand).Float64", Method, 22}, + {"(*Rand).Int", Method, 22}, + {"(*Rand).Int32", Method, 22}, + {"(*Rand).Int32N", Method, 22}, + {"(*Rand).Int64", Method, 22}, + {"(*Rand).Int64N", Method, 22}, + {"(*Rand).IntN", Method, 22}, + {"(*Rand).NormFloat64", Method, 22}, + {"(*Rand).Perm", Method, 22}, + {"(*Rand).Shuffle", Method, 22}, + {"(*Rand).Uint32", Method, 22}, + {"(*Rand).Uint32N", Method, 22}, + {"(*Rand).Uint64", Method, 22}, + {"(*Rand).Uint64N", Method, 22}, + {"(*Rand).UintN", Method, 22}, + {"(*Zipf).Uint64", Method, 22}, + {"ChaCha8", Type, 22}, + {"ExpFloat64", Func, 22}, + {"Float32", Func, 22}, + {"Float64", Func, 22}, + {"Int", Func, 22}, + {"Int32", Func, 22}, + {"Int32N", Func, 22}, + {"Int64", Func, 22}, + {"Int64N", Func, 22}, + {"IntN", Func, 22}, + {"N", Func, 22}, + {"New", Func, 22}, + {"NewChaCha8", Func, 22}, + {"NewPCG", Func, 22}, + {"NewZipf", Func, 22}, + {"NormFloat64", Func, 22}, + {"PCG", Type, 22}, + {"Perm", Func, 22}, + {"Rand", Type, 22}, + {"Shuffle", Func, 22}, + {"Source", Type, 22}, + {"Uint32", Func, 22}, + {"Uint32N", Func, 22}, + {"Uint64", Func, 22}, + {"Uint64N", Func, 22}, + {"UintN", Func, 22}, + {"Zipf", Type, 22}, + }, + "mime": { + {"(*WordDecoder).Decode", Method, 5}, + {"(*WordDecoder).DecodeHeader", Method, 5}, + {"(WordEncoder).Encode", Method, 5}, + {"AddExtensionType", Func, 0}, + {"BEncoding", Const, 5}, + {"ErrInvalidMediaParameter", Var, 9}, + {"ExtensionsByType", Func, 5}, + {"FormatMediaType", Func, 0}, + {"ParseMediaType", Func, 0}, + {"QEncoding", Const, 5}, + {"TypeByExtension", Func, 0}, + {"WordDecoder", Type, 5}, + {"WordDecoder.CharsetReader", Field, 5}, + {"WordEncoder", Type, 5}, + }, + "mime/multipart": { + {"(*FileHeader).Open", Method, 0}, + {"(*Form).RemoveAll", Method, 0}, + {"(*Part).Close", Method, 0}, + {"(*Part).FileName", Method, 0}, + {"(*Part).FormName", Method, 0}, + {"(*Part).Read", Method, 0}, + {"(*Reader).NextPart", Method, 0}, + {"(*Reader).NextRawPart", Method, 14}, + {"(*Reader).ReadForm", Method, 0}, + {"(*Writer).Boundary", Method, 0}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).CreateFormField", Method, 0}, + {"(*Writer).CreateFormFile", Method, 0}, + {"(*Writer).CreatePart", Method, 0}, + {"(*Writer).FormDataContentType", Method, 0}, + {"(*Writer).SetBoundary", Method, 1}, + {"(*Writer).WriteField", Method, 0}, + {"ErrMessageTooLarge", Var, 9}, + {"File", Type, 0}, + {"FileHeader", Type, 0}, + {"FileHeader.Filename", Field, 0}, + {"FileHeader.Header", Field, 0}, + {"FileHeader.Size", Field, 9}, + {"Form", Type, 0}, + {"Form.File", Field, 0}, + {"Form.Value", Field, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"Part", Type, 0}, + {"Part.Header", Field, 0}, + {"Reader", Type, 0}, + {"Writer", Type, 0}, + }, + "mime/quotedprintable": { + {"(*Reader).Read", Method, 5}, + {"(*Writer).Close", Method, 5}, + {"(*Writer).Write", Method, 5}, + {"NewReader", Func, 5}, + {"NewWriter", Func, 5}, + {"Reader", Type, 5}, + {"Writer", Type, 5}, + {"Writer.Binary", Field, 5}, + }, + "net": { + {"(*AddrError).Error", Method, 0}, + {"(*AddrError).Temporary", Method, 0}, + {"(*AddrError).Timeout", Method, 0}, + {"(*Buffers).Read", Method, 8}, + {"(*Buffers).WriteTo", Method, 8}, + {"(*DNSConfigError).Error", Method, 0}, + {"(*DNSConfigError).Temporary", Method, 0}, + {"(*DNSConfigError).Timeout", Method, 0}, + {"(*DNSConfigError).Unwrap", Method, 13}, + {"(*DNSError).Error", Method, 0}, + {"(*DNSError).Temporary", Method, 0}, + {"(*DNSError).Timeout", Method, 0}, + {"(*Dialer).Dial", Method, 1}, + {"(*Dialer).DialContext", Method, 7}, + {"(*Dialer).MultipathTCP", Method, 21}, + {"(*Dialer).SetMultipathTCP", Method, 21}, + {"(*IP).UnmarshalText", Method, 2}, + {"(*IPAddr).Network", Method, 0}, + {"(*IPAddr).String", Method, 0}, + {"(*IPConn).Close", Method, 0}, + {"(*IPConn).File", Method, 0}, + {"(*IPConn).LocalAddr", Method, 0}, + {"(*IPConn).Read", Method, 0}, + {"(*IPConn).ReadFrom", Method, 0}, + {"(*IPConn).ReadFromIP", Method, 0}, + {"(*IPConn).ReadMsgIP", Method, 1}, + {"(*IPConn).RemoteAddr", Method, 0}, + {"(*IPConn).SetDeadline", Method, 0}, + {"(*IPConn).SetReadBuffer", Method, 0}, + {"(*IPConn).SetReadDeadline", Method, 0}, + {"(*IPConn).SetWriteBuffer", Method, 0}, + {"(*IPConn).SetWriteDeadline", Method, 0}, + {"(*IPConn).SyscallConn", Method, 9}, + {"(*IPConn).Write", Method, 0}, + {"(*IPConn).WriteMsgIP", Method, 1}, + {"(*IPConn).WriteTo", Method, 0}, + {"(*IPConn).WriteToIP", Method, 0}, + {"(*IPNet).Contains", Method, 0}, + {"(*IPNet).Network", Method, 0}, + {"(*IPNet).String", Method, 0}, + {"(*Interface).Addrs", Method, 0}, + {"(*Interface).MulticastAddrs", Method, 0}, + {"(*ListenConfig).Listen", Method, 11}, + {"(*ListenConfig).ListenPacket", Method, 11}, + {"(*ListenConfig).MultipathTCP", Method, 21}, + {"(*ListenConfig).SetMultipathTCP", Method, 21}, + {"(*OpError).Error", Method, 0}, + {"(*OpError).Temporary", Method, 0}, + {"(*OpError).Timeout", Method, 0}, + {"(*OpError).Unwrap", Method, 13}, + {"(*ParseError).Error", Method, 0}, + {"(*ParseError).Temporary", Method, 17}, + {"(*ParseError).Timeout", Method, 17}, + {"(*Resolver).LookupAddr", Method, 8}, + {"(*Resolver).LookupCNAME", Method, 8}, + {"(*Resolver).LookupHost", Method, 8}, + {"(*Resolver).LookupIP", Method, 15}, + {"(*Resolver).LookupIPAddr", Method, 8}, + {"(*Resolver).LookupMX", Method, 8}, + {"(*Resolver).LookupNS", Method, 8}, + {"(*Resolver).LookupNetIP", Method, 18}, + {"(*Resolver).LookupPort", Method, 8}, + {"(*Resolver).LookupSRV", Method, 8}, + {"(*Resolver).LookupTXT", Method, 8}, + {"(*TCPAddr).AddrPort", Method, 18}, + {"(*TCPAddr).Network", Method, 0}, + {"(*TCPAddr).String", Method, 0}, + {"(*TCPConn).Close", Method, 0}, + {"(*TCPConn).CloseRead", Method, 0}, + {"(*TCPConn).CloseWrite", Method, 0}, + {"(*TCPConn).File", Method, 0}, + {"(*TCPConn).LocalAddr", Method, 0}, + {"(*TCPConn).MultipathTCP", Method, 21}, + {"(*TCPConn).Read", Method, 0}, + {"(*TCPConn).ReadFrom", Method, 0}, + {"(*TCPConn).RemoteAddr", Method, 0}, + {"(*TCPConn).SetDeadline", Method, 0}, + {"(*TCPConn).SetKeepAlive", Method, 0}, + {"(*TCPConn).SetKeepAlivePeriod", Method, 2}, + {"(*TCPConn).SetLinger", Method, 0}, + {"(*TCPConn).SetNoDelay", Method, 0}, + {"(*TCPConn).SetReadBuffer", Method, 0}, + {"(*TCPConn).SetReadDeadline", Method, 0}, + {"(*TCPConn).SetWriteBuffer", Method, 0}, + {"(*TCPConn).SetWriteDeadline", Method, 0}, + {"(*TCPConn).SyscallConn", Method, 9}, + {"(*TCPConn).Write", Method, 0}, + {"(*TCPConn).WriteTo", Method, 22}, + {"(*TCPListener).Accept", Method, 0}, + {"(*TCPListener).AcceptTCP", Method, 0}, + {"(*TCPListener).Addr", Method, 0}, + {"(*TCPListener).Close", Method, 0}, + {"(*TCPListener).File", Method, 0}, + {"(*TCPListener).SetDeadline", Method, 0}, + {"(*TCPListener).SyscallConn", Method, 10}, + {"(*UDPAddr).AddrPort", Method, 18}, + {"(*UDPAddr).Network", Method, 0}, + {"(*UDPAddr).String", Method, 0}, + {"(*UDPConn).Close", Method, 0}, + {"(*UDPConn).File", Method, 0}, + {"(*UDPConn).LocalAddr", Method, 0}, + {"(*UDPConn).Read", Method, 0}, + {"(*UDPConn).ReadFrom", Method, 0}, + {"(*UDPConn).ReadFromUDP", Method, 0}, + {"(*UDPConn).ReadFromUDPAddrPort", Method, 18}, + {"(*UDPConn).ReadMsgUDP", Method, 1}, + {"(*UDPConn).ReadMsgUDPAddrPort", Method, 18}, + {"(*UDPConn).RemoteAddr", Method, 0}, + {"(*UDPConn).SetDeadline", Method, 0}, + {"(*UDPConn).SetReadBuffer", Method, 0}, + {"(*UDPConn).SetReadDeadline", Method, 0}, + {"(*UDPConn).SetWriteBuffer", Method, 0}, + {"(*UDPConn).SetWriteDeadline", Method, 0}, + {"(*UDPConn).SyscallConn", Method, 9}, + {"(*UDPConn).Write", Method, 0}, + {"(*UDPConn).WriteMsgUDP", Method, 1}, + {"(*UDPConn).WriteMsgUDPAddrPort", Method, 18}, + {"(*UDPConn).WriteTo", Method, 0}, + {"(*UDPConn).WriteToUDP", Method, 0}, + {"(*UDPConn).WriteToUDPAddrPort", Method, 18}, + {"(*UnixAddr).Network", Method, 0}, + {"(*UnixAddr).String", Method, 0}, + {"(*UnixConn).Close", Method, 0}, + {"(*UnixConn).CloseRead", Method, 1}, + {"(*UnixConn).CloseWrite", Method, 1}, + {"(*UnixConn).File", Method, 0}, + {"(*UnixConn).LocalAddr", Method, 0}, + {"(*UnixConn).Read", Method, 0}, + {"(*UnixConn).ReadFrom", Method, 0}, + {"(*UnixConn).ReadFromUnix", Method, 0}, + {"(*UnixConn).ReadMsgUnix", Method, 0}, + {"(*UnixConn).RemoteAddr", Method, 0}, + {"(*UnixConn).SetDeadline", Method, 0}, + {"(*UnixConn).SetReadBuffer", Method, 0}, + {"(*UnixConn).SetReadDeadline", Method, 0}, + {"(*UnixConn).SetWriteBuffer", Method, 0}, + {"(*UnixConn).SetWriteDeadline", Method, 0}, + {"(*UnixConn).SyscallConn", Method, 9}, + {"(*UnixConn).Write", Method, 0}, + {"(*UnixConn).WriteMsgUnix", Method, 0}, + {"(*UnixConn).WriteTo", Method, 0}, + {"(*UnixConn).WriteToUnix", Method, 0}, + {"(*UnixListener).Accept", Method, 0}, + {"(*UnixListener).AcceptUnix", Method, 0}, + {"(*UnixListener).Addr", Method, 0}, + {"(*UnixListener).Close", Method, 0}, + {"(*UnixListener).File", Method, 0}, + {"(*UnixListener).SetDeadline", Method, 0}, + {"(*UnixListener).SetUnlinkOnClose", Method, 8}, + {"(*UnixListener).SyscallConn", Method, 10}, + {"(Flags).String", Method, 0}, + {"(HardwareAddr).String", Method, 0}, + {"(IP).DefaultMask", Method, 0}, + {"(IP).Equal", Method, 0}, + {"(IP).IsGlobalUnicast", Method, 0}, + {"(IP).IsInterfaceLocalMulticast", Method, 0}, + {"(IP).IsLinkLocalMulticast", Method, 0}, + {"(IP).IsLinkLocalUnicast", Method, 0}, + {"(IP).IsLoopback", Method, 0}, + {"(IP).IsMulticast", Method, 0}, + {"(IP).IsPrivate", Method, 17}, + {"(IP).IsUnspecified", Method, 0}, + {"(IP).MarshalText", Method, 2}, + {"(IP).Mask", Method, 0}, + {"(IP).String", Method, 0}, + {"(IP).To16", Method, 0}, + {"(IP).To4", Method, 0}, + {"(IPMask).Size", Method, 0}, + {"(IPMask).String", Method, 0}, + {"(InvalidAddrError).Error", Method, 0}, + {"(InvalidAddrError).Temporary", Method, 0}, + {"(InvalidAddrError).Timeout", Method, 0}, + {"(UnknownNetworkError).Error", Method, 0}, + {"(UnknownNetworkError).Temporary", Method, 0}, + {"(UnknownNetworkError).Timeout", Method, 0}, + {"Addr", Type, 0}, + {"AddrError", Type, 0}, + {"AddrError.Addr", Field, 0}, + {"AddrError.Err", Field, 0}, + {"Buffers", Type, 8}, + {"CIDRMask", Func, 0}, + {"Conn", Type, 0}, + {"DNSConfigError", Type, 0}, + {"DNSConfigError.Err", Field, 0}, + {"DNSError", Type, 0}, + {"DNSError.Err", Field, 0}, + {"DNSError.IsNotFound", Field, 13}, + {"DNSError.IsTemporary", Field, 6}, + {"DNSError.IsTimeout", Field, 0}, + {"DNSError.Name", Field, 0}, + {"DNSError.Server", Field, 0}, + {"DefaultResolver", Var, 8}, + {"Dial", Func, 0}, + {"DialIP", Func, 0}, + {"DialTCP", Func, 0}, + {"DialTimeout", Func, 0}, + {"DialUDP", Func, 0}, + {"DialUnix", Func, 0}, + {"Dialer", Type, 1}, + {"Dialer.Cancel", Field, 6}, + {"Dialer.Control", Field, 11}, + {"Dialer.ControlContext", Field, 20}, + {"Dialer.Deadline", Field, 1}, + {"Dialer.DualStack", Field, 2}, + {"Dialer.FallbackDelay", Field, 5}, + {"Dialer.KeepAlive", Field, 3}, + {"Dialer.LocalAddr", Field, 1}, + {"Dialer.Resolver", Field, 8}, + {"Dialer.Timeout", Field, 1}, + {"ErrClosed", Var, 16}, + {"ErrWriteToConnected", Var, 0}, + {"Error", Type, 0}, + {"FileConn", Func, 0}, + {"FileListener", Func, 0}, + {"FilePacketConn", Func, 0}, + {"FlagBroadcast", Const, 0}, + {"FlagLoopback", Const, 0}, + {"FlagMulticast", Const, 0}, + {"FlagPointToPoint", Const, 0}, + {"FlagRunning", Const, 20}, + {"FlagUp", Const, 0}, + {"Flags", Type, 0}, + {"HardwareAddr", Type, 0}, + {"IP", Type, 0}, + {"IPAddr", Type, 0}, + {"IPAddr.IP", Field, 0}, + {"IPAddr.Zone", Field, 1}, + {"IPConn", Type, 0}, + {"IPMask", Type, 0}, + {"IPNet", Type, 0}, + {"IPNet.IP", Field, 0}, + {"IPNet.Mask", Field, 0}, + {"IPv4", Func, 0}, + {"IPv4Mask", Func, 0}, + {"IPv4allrouter", Var, 0}, + {"IPv4allsys", Var, 0}, + {"IPv4bcast", Var, 0}, + {"IPv4len", Const, 0}, + {"IPv4zero", Var, 0}, + {"IPv6interfacelocalallnodes", Var, 0}, + {"IPv6len", Const, 0}, + {"IPv6linklocalallnodes", Var, 0}, + {"IPv6linklocalallrouters", Var, 0}, + {"IPv6loopback", Var, 0}, + {"IPv6unspecified", Var, 0}, + {"IPv6zero", Var, 0}, + {"Interface", Type, 0}, + {"Interface.Flags", Field, 0}, + {"Interface.HardwareAddr", Field, 0}, + {"Interface.Index", Field, 0}, + {"Interface.MTU", Field, 0}, + {"Interface.Name", Field, 0}, + {"InterfaceAddrs", Func, 0}, + {"InterfaceByIndex", Func, 0}, + {"InterfaceByName", Func, 0}, + {"Interfaces", Func, 0}, + {"InvalidAddrError", Type, 0}, + {"JoinHostPort", Func, 0}, + {"Listen", Func, 0}, + {"ListenConfig", Type, 11}, + {"ListenConfig.Control", Field, 11}, + {"ListenConfig.KeepAlive", Field, 13}, + {"ListenIP", Func, 0}, + {"ListenMulticastUDP", Func, 0}, + {"ListenPacket", Func, 0}, + {"ListenTCP", Func, 0}, + {"ListenUDP", Func, 0}, + {"ListenUnix", Func, 0}, + {"ListenUnixgram", Func, 0}, + {"Listener", Type, 0}, + {"LookupAddr", Func, 0}, + {"LookupCNAME", Func, 0}, + {"LookupHost", Func, 0}, + {"LookupIP", Func, 0}, + {"LookupMX", Func, 0}, + {"LookupNS", Func, 1}, + {"LookupPort", Func, 0}, + {"LookupSRV", Func, 0}, + {"LookupTXT", Func, 0}, + {"MX", Type, 0}, + {"MX.Host", Field, 0}, + {"MX.Pref", Field, 0}, + {"NS", Type, 1}, + {"NS.Host", Field, 1}, + {"OpError", Type, 0}, + {"OpError.Addr", Field, 0}, + {"OpError.Err", Field, 0}, + {"OpError.Net", Field, 0}, + {"OpError.Op", Field, 0}, + {"OpError.Source", Field, 5}, + {"PacketConn", Type, 0}, + {"ParseCIDR", Func, 0}, + {"ParseError", Type, 0}, + {"ParseError.Text", Field, 0}, + {"ParseError.Type", Field, 0}, + {"ParseIP", Func, 0}, + {"ParseMAC", Func, 0}, + {"Pipe", Func, 0}, + {"ResolveIPAddr", Func, 0}, + {"ResolveTCPAddr", Func, 0}, + {"ResolveUDPAddr", Func, 0}, + {"ResolveUnixAddr", Func, 0}, + {"Resolver", Type, 8}, + {"Resolver.Dial", Field, 9}, + {"Resolver.PreferGo", Field, 8}, + {"Resolver.StrictErrors", Field, 9}, + {"SRV", Type, 0}, + {"SRV.Port", Field, 0}, + {"SRV.Priority", Field, 0}, + {"SRV.Target", Field, 0}, + {"SRV.Weight", Field, 0}, + {"SplitHostPort", Func, 0}, + {"TCPAddr", Type, 0}, + {"TCPAddr.IP", Field, 0}, + {"TCPAddr.Port", Field, 0}, + {"TCPAddr.Zone", Field, 1}, + {"TCPAddrFromAddrPort", Func, 18}, + {"TCPConn", Type, 0}, + {"TCPListener", Type, 0}, + {"UDPAddr", Type, 0}, + {"UDPAddr.IP", Field, 0}, + {"UDPAddr.Port", Field, 0}, + {"UDPAddr.Zone", Field, 1}, + {"UDPAddrFromAddrPort", Func, 18}, + {"UDPConn", Type, 0}, + {"UnixAddr", Type, 0}, + {"UnixAddr.Name", Field, 0}, + {"UnixAddr.Net", Field, 0}, + {"UnixConn", Type, 0}, + {"UnixListener", Type, 0}, + {"UnknownNetworkError", Type, 0}, + }, + "net/http": { + {"(*Client).CloseIdleConnections", Method, 12}, + {"(*Client).Do", Method, 0}, + {"(*Client).Get", Method, 0}, + {"(*Client).Head", Method, 0}, + {"(*Client).Post", Method, 0}, + {"(*Client).PostForm", Method, 0}, + {"(*Cookie).String", Method, 0}, + {"(*Cookie).Valid", Method, 18}, + {"(*MaxBytesError).Error", Method, 19}, + {"(*ProtocolError).Error", Method, 0}, + {"(*ProtocolError).Is", Method, 21}, + {"(*Request).AddCookie", Method, 0}, + {"(*Request).BasicAuth", Method, 4}, + {"(*Request).Clone", Method, 13}, + {"(*Request).Context", Method, 7}, + {"(*Request).Cookie", Method, 0}, + {"(*Request).Cookies", Method, 0}, + {"(*Request).FormFile", Method, 0}, + {"(*Request).FormValue", Method, 0}, + {"(*Request).MultipartReader", Method, 0}, + {"(*Request).ParseForm", Method, 0}, + {"(*Request).ParseMultipartForm", Method, 0}, + {"(*Request).PathValue", Method, 22}, + {"(*Request).PostFormValue", Method, 1}, + {"(*Request).ProtoAtLeast", Method, 0}, + {"(*Request).Referer", Method, 0}, + {"(*Request).SetBasicAuth", Method, 0}, + {"(*Request).SetPathValue", Method, 22}, + {"(*Request).UserAgent", Method, 0}, + {"(*Request).WithContext", Method, 7}, + {"(*Request).Write", Method, 0}, + {"(*Request).WriteProxy", Method, 0}, + {"(*Response).Cookies", Method, 0}, + {"(*Response).Location", Method, 0}, + {"(*Response).ProtoAtLeast", Method, 0}, + {"(*Response).Write", Method, 0}, + {"(*ResponseController).EnableFullDuplex", Method, 21}, + {"(*ResponseController).Flush", Method, 20}, + {"(*ResponseController).Hijack", Method, 20}, + {"(*ResponseController).SetReadDeadline", Method, 20}, + {"(*ResponseController).SetWriteDeadline", Method, 20}, + {"(*ServeMux).Handle", Method, 0}, + {"(*ServeMux).HandleFunc", Method, 0}, + {"(*ServeMux).Handler", Method, 1}, + {"(*ServeMux).ServeHTTP", Method, 0}, + {"(*Server).Close", Method, 8}, + {"(*Server).ListenAndServe", Method, 0}, + {"(*Server).ListenAndServeTLS", Method, 0}, + {"(*Server).RegisterOnShutdown", Method, 9}, + {"(*Server).Serve", Method, 0}, + {"(*Server).ServeTLS", Method, 9}, + {"(*Server).SetKeepAlivesEnabled", Method, 3}, + {"(*Server).Shutdown", Method, 8}, + {"(*Transport).CancelRequest", Method, 1}, + {"(*Transport).Clone", Method, 13}, + {"(*Transport).CloseIdleConnections", Method, 0}, + {"(*Transport).RegisterProtocol", Method, 0}, + {"(*Transport).RoundTrip", Method, 0}, + {"(ConnState).String", Method, 3}, + {"(Dir).Open", Method, 0}, + {"(HandlerFunc).ServeHTTP", Method, 0}, + {"(Header).Add", Method, 0}, + {"(Header).Clone", Method, 13}, + {"(Header).Del", Method, 0}, + {"(Header).Get", Method, 0}, + {"(Header).Set", Method, 0}, + {"(Header).Values", Method, 14}, + {"(Header).Write", Method, 0}, + {"(Header).WriteSubset", Method, 0}, + {"AllowQuerySemicolons", Func, 17}, + {"CanonicalHeaderKey", Func, 0}, + {"Client", Type, 0}, + {"Client.CheckRedirect", Field, 0}, + {"Client.Jar", Field, 0}, + {"Client.Timeout", Field, 3}, + {"Client.Transport", Field, 0}, + {"CloseNotifier", Type, 1}, + {"ConnState", Type, 3}, + {"Cookie", Type, 0}, + {"Cookie.Domain", Field, 0}, + {"Cookie.Expires", Field, 0}, + {"Cookie.HttpOnly", Field, 0}, + {"Cookie.MaxAge", Field, 0}, + {"Cookie.Name", Field, 0}, + {"Cookie.Path", Field, 0}, + {"Cookie.Raw", Field, 0}, + {"Cookie.RawExpires", Field, 0}, + {"Cookie.SameSite", Field, 11}, + {"Cookie.Secure", Field, 0}, + {"Cookie.Unparsed", Field, 0}, + {"Cookie.Value", Field, 0}, + {"CookieJar", Type, 0}, + {"DefaultClient", Var, 0}, + {"DefaultMaxHeaderBytes", Const, 0}, + {"DefaultMaxIdleConnsPerHost", Const, 0}, + {"DefaultServeMux", Var, 0}, + {"DefaultTransport", Var, 0}, + {"DetectContentType", Func, 0}, + {"Dir", Type, 0}, + {"ErrAbortHandler", Var, 8}, + {"ErrBodyNotAllowed", Var, 0}, + {"ErrBodyReadAfterClose", Var, 0}, + {"ErrContentLength", Var, 0}, + {"ErrHandlerTimeout", Var, 0}, + {"ErrHeaderTooLong", Var, 0}, + {"ErrHijacked", Var, 0}, + {"ErrLineTooLong", Var, 0}, + {"ErrMissingBoundary", Var, 0}, + {"ErrMissingContentLength", Var, 0}, + {"ErrMissingFile", Var, 0}, + {"ErrNoCookie", Var, 0}, + {"ErrNoLocation", Var, 0}, + {"ErrNotMultipart", Var, 0}, + {"ErrNotSupported", Var, 0}, + {"ErrSchemeMismatch", Var, 21}, + {"ErrServerClosed", Var, 8}, + {"ErrShortBody", Var, 0}, + {"ErrSkipAltProtocol", Var, 6}, + {"ErrUnexpectedTrailer", Var, 0}, + {"ErrUseLastResponse", Var, 7}, + {"ErrWriteAfterFlush", Var, 0}, + {"Error", Func, 0}, + {"FS", Func, 16}, + {"File", Type, 0}, + {"FileServer", Func, 0}, + {"FileServerFS", Func, 22}, + {"FileSystem", Type, 0}, + {"Flusher", Type, 0}, + {"Get", Func, 0}, + {"Handle", Func, 0}, + {"HandleFunc", Func, 0}, + {"Handler", Type, 0}, + {"HandlerFunc", Type, 0}, + {"Head", Func, 0}, + {"Header", Type, 0}, + {"Hijacker", Type, 0}, + {"ListenAndServe", Func, 0}, + {"ListenAndServeTLS", Func, 0}, + {"LocalAddrContextKey", Var, 7}, + {"MaxBytesError", Type, 19}, + {"MaxBytesError.Limit", Field, 19}, + {"MaxBytesHandler", Func, 18}, + {"MaxBytesReader", Func, 0}, + {"MethodConnect", Const, 6}, + {"MethodDelete", Const, 6}, + {"MethodGet", Const, 6}, + {"MethodHead", Const, 6}, + {"MethodOptions", Const, 6}, + {"MethodPatch", Const, 6}, + {"MethodPost", Const, 6}, + {"MethodPut", Const, 6}, + {"MethodTrace", Const, 6}, + {"NewFileTransport", Func, 0}, + {"NewFileTransportFS", Func, 22}, + {"NewRequest", Func, 0}, + {"NewRequestWithContext", Func, 13}, + {"NewResponseController", Func, 20}, + {"NewServeMux", Func, 0}, + {"NoBody", Var, 8}, + {"NotFound", Func, 0}, + {"NotFoundHandler", Func, 0}, + {"ParseHTTPVersion", Func, 0}, + {"ParseTime", Func, 1}, + {"Post", Func, 0}, + {"PostForm", Func, 0}, + {"ProtocolError", Type, 0}, + {"ProtocolError.ErrorString", Field, 0}, + {"ProxyFromEnvironment", Func, 0}, + {"ProxyURL", Func, 0}, + {"PushOptions", Type, 8}, + {"PushOptions.Header", Field, 8}, + {"PushOptions.Method", Field, 8}, + {"Pusher", Type, 8}, + {"ReadRequest", Func, 0}, + {"ReadResponse", Func, 0}, + {"Redirect", Func, 0}, + {"RedirectHandler", Func, 0}, + {"Request", Type, 0}, + {"Request.Body", Field, 0}, + {"Request.Cancel", Field, 5}, + {"Request.Close", Field, 0}, + {"Request.ContentLength", Field, 0}, + {"Request.Form", Field, 0}, + {"Request.GetBody", Field, 8}, + {"Request.Header", Field, 0}, + {"Request.Host", Field, 0}, + {"Request.Method", Field, 0}, + {"Request.MultipartForm", Field, 0}, + {"Request.PostForm", Field, 1}, + {"Request.Proto", Field, 0}, + {"Request.ProtoMajor", Field, 0}, + {"Request.ProtoMinor", Field, 0}, + {"Request.RemoteAddr", Field, 0}, + {"Request.RequestURI", Field, 0}, + {"Request.Response", Field, 7}, + {"Request.TLS", Field, 0}, + {"Request.Trailer", Field, 0}, + {"Request.TransferEncoding", Field, 0}, + {"Request.URL", Field, 0}, + {"Response", Type, 0}, + {"Response.Body", Field, 0}, + {"Response.Close", Field, 0}, + {"Response.ContentLength", Field, 0}, + {"Response.Header", Field, 0}, + {"Response.Proto", Field, 0}, + {"Response.ProtoMajor", Field, 0}, + {"Response.ProtoMinor", Field, 0}, + {"Response.Request", Field, 0}, + {"Response.Status", Field, 0}, + {"Response.StatusCode", Field, 0}, + {"Response.TLS", Field, 3}, + {"Response.Trailer", Field, 0}, + {"Response.TransferEncoding", Field, 0}, + {"Response.Uncompressed", Field, 7}, + {"ResponseController", Type, 20}, + {"ResponseWriter", Type, 0}, + {"RoundTripper", Type, 0}, + {"SameSite", Type, 11}, + {"SameSiteDefaultMode", Const, 11}, + {"SameSiteLaxMode", Const, 11}, + {"SameSiteNoneMode", Const, 13}, + {"SameSiteStrictMode", Const, 11}, + {"Serve", Func, 0}, + {"ServeContent", Func, 0}, + {"ServeFile", Func, 0}, + {"ServeFileFS", Func, 22}, + {"ServeMux", Type, 0}, + {"ServeTLS", Func, 9}, + {"Server", Type, 0}, + {"Server.Addr", Field, 0}, + {"Server.BaseContext", Field, 13}, + {"Server.ConnContext", Field, 13}, + {"Server.ConnState", Field, 3}, + {"Server.DisableGeneralOptionsHandler", Field, 20}, + {"Server.ErrorLog", Field, 3}, + {"Server.Handler", Field, 0}, + {"Server.IdleTimeout", Field, 8}, + {"Server.MaxHeaderBytes", Field, 0}, + {"Server.ReadHeaderTimeout", Field, 8}, + {"Server.ReadTimeout", Field, 0}, + {"Server.TLSConfig", Field, 0}, + {"Server.TLSNextProto", Field, 1}, + {"Server.WriteTimeout", Field, 0}, + {"ServerContextKey", Var, 7}, + {"SetCookie", Func, 0}, + {"StateActive", Const, 3}, + {"StateClosed", Const, 3}, + {"StateHijacked", Const, 3}, + {"StateIdle", Const, 3}, + {"StateNew", Const, 3}, + {"StatusAccepted", Const, 0}, + {"StatusAlreadyReported", Const, 7}, + {"StatusBadGateway", Const, 0}, + {"StatusBadRequest", Const, 0}, + {"StatusConflict", Const, 0}, + {"StatusContinue", Const, 0}, + {"StatusCreated", Const, 0}, + {"StatusEarlyHints", Const, 13}, + {"StatusExpectationFailed", Const, 0}, + {"StatusFailedDependency", Const, 7}, + {"StatusForbidden", Const, 0}, + {"StatusFound", Const, 0}, + {"StatusGatewayTimeout", Const, 0}, + {"StatusGone", Const, 0}, + {"StatusHTTPVersionNotSupported", Const, 0}, + {"StatusIMUsed", Const, 7}, + {"StatusInsufficientStorage", Const, 7}, + {"StatusInternalServerError", Const, 0}, + {"StatusLengthRequired", Const, 0}, + {"StatusLocked", Const, 7}, + {"StatusLoopDetected", Const, 7}, + {"StatusMethodNotAllowed", Const, 0}, + {"StatusMisdirectedRequest", Const, 11}, + {"StatusMovedPermanently", Const, 0}, + {"StatusMultiStatus", Const, 7}, + {"StatusMultipleChoices", Const, 0}, + {"StatusNetworkAuthenticationRequired", Const, 6}, + {"StatusNoContent", Const, 0}, + {"StatusNonAuthoritativeInfo", Const, 0}, + {"StatusNotAcceptable", Const, 0}, + {"StatusNotExtended", Const, 7}, + {"StatusNotFound", Const, 0}, + {"StatusNotImplemented", Const, 0}, + {"StatusNotModified", Const, 0}, + {"StatusOK", Const, 0}, + {"StatusPartialContent", Const, 0}, + {"StatusPaymentRequired", Const, 0}, + {"StatusPermanentRedirect", Const, 7}, + {"StatusPreconditionFailed", Const, 0}, + {"StatusPreconditionRequired", Const, 6}, + {"StatusProcessing", Const, 7}, + {"StatusProxyAuthRequired", Const, 0}, + {"StatusRequestEntityTooLarge", Const, 0}, + {"StatusRequestHeaderFieldsTooLarge", Const, 6}, + {"StatusRequestTimeout", Const, 0}, + {"StatusRequestURITooLong", Const, 0}, + {"StatusRequestedRangeNotSatisfiable", Const, 0}, + {"StatusResetContent", Const, 0}, + {"StatusSeeOther", Const, 0}, + {"StatusServiceUnavailable", Const, 0}, + {"StatusSwitchingProtocols", Const, 0}, + {"StatusTeapot", Const, 0}, + {"StatusTemporaryRedirect", Const, 0}, + {"StatusText", Func, 0}, + {"StatusTooEarly", Const, 12}, + {"StatusTooManyRequests", Const, 6}, + {"StatusUnauthorized", Const, 0}, + {"StatusUnavailableForLegalReasons", Const, 6}, + {"StatusUnprocessableEntity", Const, 7}, + {"StatusUnsupportedMediaType", Const, 0}, + {"StatusUpgradeRequired", Const, 7}, + {"StatusUseProxy", Const, 0}, + {"StatusVariantAlsoNegotiates", Const, 7}, + {"StripPrefix", Func, 0}, + {"TimeFormat", Const, 0}, + {"TimeoutHandler", Func, 0}, + {"TrailerPrefix", Const, 8}, + {"Transport", Type, 0}, + {"Transport.Dial", Field, 0}, + {"Transport.DialContext", Field, 7}, + {"Transport.DialTLS", Field, 4}, + {"Transport.DialTLSContext", Field, 14}, + {"Transport.DisableCompression", Field, 0}, + {"Transport.DisableKeepAlives", Field, 0}, + {"Transport.ExpectContinueTimeout", Field, 6}, + {"Transport.ForceAttemptHTTP2", Field, 13}, + {"Transport.GetProxyConnectHeader", Field, 16}, + {"Transport.IdleConnTimeout", Field, 7}, + {"Transport.MaxConnsPerHost", Field, 11}, + {"Transport.MaxIdleConns", Field, 7}, + {"Transport.MaxIdleConnsPerHost", Field, 0}, + {"Transport.MaxResponseHeaderBytes", Field, 7}, + {"Transport.OnProxyConnectResponse", Field, 20}, + {"Transport.Proxy", Field, 0}, + {"Transport.ProxyConnectHeader", Field, 8}, + {"Transport.ReadBufferSize", Field, 13}, + {"Transport.ResponseHeaderTimeout", Field, 1}, + {"Transport.TLSClientConfig", Field, 0}, + {"Transport.TLSHandshakeTimeout", Field, 3}, + {"Transport.TLSNextProto", Field, 6}, + {"Transport.WriteBufferSize", Field, 13}, + }, + "net/http/cgi": { + {"(*Handler).ServeHTTP", Method, 0}, + {"Handler", Type, 0}, + {"Handler.Args", Field, 0}, + {"Handler.Dir", Field, 0}, + {"Handler.Env", Field, 0}, + {"Handler.InheritEnv", Field, 0}, + {"Handler.Logger", Field, 0}, + {"Handler.Path", Field, 0}, + {"Handler.PathLocationHandler", Field, 0}, + {"Handler.Root", Field, 0}, + {"Handler.Stderr", Field, 7}, + {"Request", Func, 0}, + {"RequestFromMap", Func, 0}, + {"Serve", Func, 0}, + }, + "net/http/cookiejar": { + {"(*Jar).Cookies", Method, 1}, + {"(*Jar).SetCookies", Method, 1}, + {"Jar", Type, 1}, + {"New", Func, 1}, + {"Options", Type, 1}, + {"Options.PublicSuffixList", Field, 1}, + {"PublicSuffixList", Type, 1}, + }, + "net/http/fcgi": { + {"ErrConnClosed", Var, 5}, + {"ErrRequestAborted", Var, 5}, + {"ProcessEnv", Func, 9}, + {"Serve", Func, 0}, + }, + "net/http/httptest": { + {"(*ResponseRecorder).Flush", Method, 0}, + {"(*ResponseRecorder).Header", Method, 0}, + {"(*ResponseRecorder).Result", Method, 7}, + {"(*ResponseRecorder).Write", Method, 0}, + {"(*ResponseRecorder).WriteHeader", Method, 0}, + {"(*ResponseRecorder).WriteString", Method, 6}, + {"(*Server).Certificate", Method, 9}, + {"(*Server).Client", Method, 9}, + {"(*Server).Close", Method, 0}, + {"(*Server).CloseClientConnections", Method, 0}, + {"(*Server).Start", Method, 0}, + {"(*Server).StartTLS", Method, 0}, + {"DefaultRemoteAddr", Const, 0}, + {"NewRecorder", Func, 0}, + {"NewRequest", Func, 7}, + {"NewServer", Func, 0}, + {"NewTLSServer", Func, 0}, + {"NewUnstartedServer", Func, 0}, + {"ResponseRecorder", Type, 0}, + {"ResponseRecorder.Body", Field, 0}, + {"ResponseRecorder.Code", Field, 0}, + {"ResponseRecorder.Flushed", Field, 0}, + {"ResponseRecorder.HeaderMap", Field, 0}, + {"Server", Type, 0}, + {"Server.Config", Field, 0}, + {"Server.EnableHTTP2", Field, 14}, + {"Server.Listener", Field, 0}, + {"Server.TLS", Field, 0}, + {"Server.URL", Field, 0}, + }, + "net/http/httptrace": { + {"ClientTrace", Type, 7}, + {"ClientTrace.ConnectDone", Field, 7}, + {"ClientTrace.ConnectStart", Field, 7}, + {"ClientTrace.DNSDone", Field, 7}, + {"ClientTrace.DNSStart", Field, 7}, + {"ClientTrace.GetConn", Field, 7}, + {"ClientTrace.Got100Continue", Field, 7}, + {"ClientTrace.Got1xxResponse", Field, 11}, + {"ClientTrace.GotConn", Field, 7}, + {"ClientTrace.GotFirstResponseByte", Field, 7}, + {"ClientTrace.PutIdleConn", Field, 7}, + {"ClientTrace.TLSHandshakeDone", Field, 8}, + {"ClientTrace.TLSHandshakeStart", Field, 8}, + {"ClientTrace.Wait100Continue", Field, 7}, + {"ClientTrace.WroteHeaderField", Field, 11}, + {"ClientTrace.WroteHeaders", Field, 7}, + {"ClientTrace.WroteRequest", Field, 7}, + {"ContextClientTrace", Func, 7}, + {"DNSDoneInfo", Type, 7}, + {"DNSDoneInfo.Addrs", Field, 7}, + {"DNSDoneInfo.Coalesced", Field, 7}, + {"DNSDoneInfo.Err", Field, 7}, + {"DNSStartInfo", Type, 7}, + {"DNSStartInfo.Host", Field, 7}, + {"GotConnInfo", Type, 7}, + {"GotConnInfo.Conn", Field, 7}, + {"GotConnInfo.IdleTime", Field, 7}, + {"GotConnInfo.Reused", Field, 7}, + {"GotConnInfo.WasIdle", Field, 7}, + {"WithClientTrace", Func, 7}, + {"WroteRequestInfo", Type, 7}, + {"WroteRequestInfo.Err", Field, 7}, + }, + "net/http/httputil": { + {"(*ClientConn).Close", Method, 0}, + {"(*ClientConn).Do", Method, 0}, + {"(*ClientConn).Hijack", Method, 0}, + {"(*ClientConn).Pending", Method, 0}, + {"(*ClientConn).Read", Method, 0}, + {"(*ClientConn).Write", Method, 0}, + {"(*ProxyRequest).SetURL", Method, 20}, + {"(*ProxyRequest).SetXForwarded", Method, 20}, + {"(*ReverseProxy).ServeHTTP", Method, 0}, + {"(*ServerConn).Close", Method, 0}, + {"(*ServerConn).Hijack", Method, 0}, + {"(*ServerConn).Pending", Method, 0}, + {"(*ServerConn).Read", Method, 0}, + {"(*ServerConn).Write", Method, 0}, + {"BufferPool", Type, 6}, + {"ClientConn", Type, 0}, + {"DumpRequest", Func, 0}, + {"DumpRequestOut", Func, 0}, + {"DumpResponse", Func, 0}, + {"ErrClosed", Var, 0}, + {"ErrLineTooLong", Var, 0}, + {"ErrPersistEOF", Var, 0}, + {"ErrPipeline", Var, 0}, + {"NewChunkedReader", Func, 0}, + {"NewChunkedWriter", Func, 0}, + {"NewClientConn", Func, 0}, + {"NewProxyClientConn", Func, 0}, + {"NewServerConn", Func, 0}, + {"NewSingleHostReverseProxy", Func, 0}, + {"ProxyRequest", Type, 20}, + {"ProxyRequest.In", Field, 20}, + {"ProxyRequest.Out", Field, 20}, + {"ReverseProxy", Type, 0}, + {"ReverseProxy.BufferPool", Field, 6}, + {"ReverseProxy.Director", Field, 0}, + {"ReverseProxy.ErrorHandler", Field, 11}, + {"ReverseProxy.ErrorLog", Field, 4}, + {"ReverseProxy.FlushInterval", Field, 0}, + {"ReverseProxy.ModifyResponse", Field, 8}, + {"ReverseProxy.Rewrite", Field, 20}, + {"ReverseProxy.Transport", Field, 0}, + {"ServerConn", Type, 0}, + }, + "net/http/pprof": { + {"Cmdline", Func, 0}, + {"Handler", Func, 0}, + {"Index", Func, 0}, + {"Profile", Func, 0}, + {"Symbol", Func, 0}, + {"Trace", Func, 5}, + }, + "net/mail": { + {"(*Address).String", Method, 0}, + {"(*AddressParser).Parse", Method, 5}, + {"(*AddressParser).ParseList", Method, 5}, + {"(Header).AddressList", Method, 0}, + {"(Header).Date", Method, 0}, + {"(Header).Get", Method, 0}, + {"Address", Type, 0}, + {"Address.Address", Field, 0}, + {"Address.Name", Field, 0}, + {"AddressParser", Type, 5}, + {"AddressParser.WordDecoder", Field, 5}, + {"ErrHeaderNotPresent", Var, 0}, + {"Header", Type, 0}, + {"Message", Type, 0}, + {"Message.Body", Field, 0}, + {"Message.Header", Field, 0}, + {"ParseAddress", Func, 1}, + {"ParseAddressList", Func, 1}, + {"ParseDate", Func, 8}, + {"ReadMessage", Func, 0}, + }, + "net/netip": { + {"(*Addr).UnmarshalBinary", Method, 18}, + {"(*Addr).UnmarshalText", Method, 18}, + {"(*AddrPort).UnmarshalBinary", Method, 18}, + {"(*AddrPort).UnmarshalText", Method, 18}, + {"(*Prefix).UnmarshalBinary", Method, 18}, + {"(*Prefix).UnmarshalText", Method, 18}, + {"(Addr).AppendTo", Method, 18}, + {"(Addr).As16", Method, 18}, + {"(Addr).As4", Method, 18}, + {"(Addr).AsSlice", Method, 18}, + {"(Addr).BitLen", Method, 18}, + {"(Addr).Compare", Method, 18}, + {"(Addr).Is4", Method, 18}, + {"(Addr).Is4In6", Method, 18}, + {"(Addr).Is6", Method, 18}, + {"(Addr).IsGlobalUnicast", Method, 18}, + {"(Addr).IsInterfaceLocalMulticast", Method, 18}, + {"(Addr).IsLinkLocalMulticast", Method, 18}, + {"(Addr).IsLinkLocalUnicast", Method, 18}, + {"(Addr).IsLoopback", Method, 18}, + {"(Addr).IsMulticast", Method, 18}, + {"(Addr).IsPrivate", Method, 18}, + {"(Addr).IsUnspecified", Method, 18}, + {"(Addr).IsValid", Method, 18}, + {"(Addr).Less", Method, 18}, + {"(Addr).MarshalBinary", Method, 18}, + {"(Addr).MarshalText", Method, 18}, + {"(Addr).Next", Method, 18}, + {"(Addr).Prefix", Method, 18}, + {"(Addr).Prev", Method, 18}, + {"(Addr).String", Method, 18}, + {"(Addr).StringExpanded", Method, 18}, + {"(Addr).Unmap", Method, 18}, + {"(Addr).WithZone", Method, 18}, + {"(Addr).Zone", Method, 18}, + {"(AddrPort).Addr", Method, 18}, + {"(AddrPort).AppendTo", Method, 18}, + {"(AddrPort).Compare", Method, 22}, + {"(AddrPort).IsValid", Method, 18}, + {"(AddrPort).MarshalBinary", Method, 18}, + {"(AddrPort).MarshalText", Method, 18}, + {"(AddrPort).Port", Method, 18}, + {"(AddrPort).String", Method, 18}, + {"(Prefix).Addr", Method, 18}, + {"(Prefix).AppendTo", Method, 18}, + {"(Prefix).Bits", Method, 18}, + {"(Prefix).Contains", Method, 18}, + {"(Prefix).IsSingleIP", Method, 18}, + {"(Prefix).IsValid", Method, 18}, + {"(Prefix).MarshalBinary", Method, 18}, + {"(Prefix).MarshalText", Method, 18}, + {"(Prefix).Masked", Method, 18}, + {"(Prefix).Overlaps", Method, 18}, + {"(Prefix).String", Method, 18}, + {"Addr", Type, 18}, + {"AddrFrom16", Func, 18}, + {"AddrFrom4", Func, 18}, + {"AddrFromSlice", Func, 18}, + {"AddrPort", Type, 18}, + {"AddrPortFrom", Func, 18}, + {"IPv4Unspecified", Func, 18}, + {"IPv6LinkLocalAllNodes", Func, 18}, + {"IPv6LinkLocalAllRouters", Func, 20}, + {"IPv6Loopback", Func, 20}, + {"IPv6Unspecified", Func, 18}, + {"MustParseAddr", Func, 18}, + {"MustParseAddrPort", Func, 18}, + {"MustParsePrefix", Func, 18}, + {"ParseAddr", Func, 18}, + {"ParseAddrPort", Func, 18}, + {"ParsePrefix", Func, 18}, + {"Prefix", Type, 18}, + {"PrefixFrom", Func, 18}, + }, + "net/rpc": { + {"(*Client).Call", Method, 0}, + {"(*Client).Close", Method, 0}, + {"(*Client).Go", Method, 0}, + {"(*Server).Accept", Method, 0}, + {"(*Server).HandleHTTP", Method, 0}, + {"(*Server).Register", Method, 0}, + {"(*Server).RegisterName", Method, 0}, + {"(*Server).ServeCodec", Method, 0}, + {"(*Server).ServeConn", Method, 0}, + {"(*Server).ServeHTTP", Method, 0}, + {"(*Server).ServeRequest", Method, 0}, + {"(ServerError).Error", Method, 0}, + {"Accept", Func, 0}, + {"Call", Type, 0}, + {"Call.Args", Field, 0}, + {"Call.Done", Field, 0}, + {"Call.Error", Field, 0}, + {"Call.Reply", Field, 0}, + {"Call.ServiceMethod", Field, 0}, + {"Client", Type, 0}, + {"ClientCodec", Type, 0}, + {"DefaultDebugPath", Const, 0}, + {"DefaultRPCPath", Const, 0}, + {"DefaultServer", Var, 0}, + {"Dial", Func, 0}, + {"DialHTTP", Func, 0}, + {"DialHTTPPath", Func, 0}, + {"ErrShutdown", Var, 0}, + {"HandleHTTP", Func, 0}, + {"NewClient", Func, 0}, + {"NewClientWithCodec", Func, 0}, + {"NewServer", Func, 0}, + {"Register", Func, 0}, + {"RegisterName", Func, 0}, + {"Request", Type, 0}, + {"Request.Seq", Field, 0}, + {"Request.ServiceMethod", Field, 0}, + {"Response", Type, 0}, + {"Response.Error", Field, 0}, + {"Response.Seq", Field, 0}, + {"Response.ServiceMethod", Field, 0}, + {"ServeCodec", Func, 0}, + {"ServeConn", Func, 0}, + {"ServeRequest", Func, 0}, + {"Server", Type, 0}, + {"ServerCodec", Type, 0}, + {"ServerError", Type, 0}, + }, + "net/rpc/jsonrpc": { + {"Dial", Func, 0}, + {"NewClient", Func, 0}, + {"NewClientCodec", Func, 0}, + {"NewServerCodec", Func, 0}, + {"ServeConn", Func, 0}, + }, + "net/smtp": { + {"(*Client).Auth", Method, 0}, + {"(*Client).Close", Method, 2}, + {"(*Client).Data", Method, 0}, + {"(*Client).Extension", Method, 0}, + {"(*Client).Hello", Method, 1}, + {"(*Client).Mail", Method, 0}, + {"(*Client).Noop", Method, 10}, + {"(*Client).Quit", Method, 0}, + {"(*Client).Rcpt", Method, 0}, + {"(*Client).Reset", Method, 0}, + {"(*Client).StartTLS", Method, 0}, + {"(*Client).TLSConnectionState", Method, 5}, + {"(*Client).Verify", Method, 0}, + {"Auth", Type, 0}, + {"CRAMMD5Auth", Func, 0}, + {"Client", Type, 0}, + {"Client.Text", Field, 0}, + {"Dial", Func, 0}, + {"NewClient", Func, 0}, + {"PlainAuth", Func, 0}, + {"SendMail", Func, 0}, + {"ServerInfo", Type, 0}, + {"ServerInfo.Auth", Field, 0}, + {"ServerInfo.Name", Field, 0}, + {"ServerInfo.TLS", Field, 0}, + }, + "net/textproto": { + {"(*Conn).Close", Method, 0}, + {"(*Conn).Cmd", Method, 0}, + {"(*Conn).DotReader", Method, 0}, + {"(*Conn).DotWriter", Method, 0}, + {"(*Conn).EndRequest", Method, 0}, + {"(*Conn).EndResponse", Method, 0}, + {"(*Conn).Next", Method, 0}, + {"(*Conn).PrintfLine", Method, 0}, + {"(*Conn).ReadCodeLine", Method, 0}, + {"(*Conn).ReadContinuedLine", Method, 0}, + {"(*Conn).ReadContinuedLineBytes", Method, 0}, + {"(*Conn).ReadDotBytes", Method, 0}, + {"(*Conn).ReadDotLines", Method, 0}, + {"(*Conn).ReadLine", Method, 0}, + {"(*Conn).ReadLineBytes", Method, 0}, + {"(*Conn).ReadMIMEHeader", Method, 0}, + {"(*Conn).ReadResponse", Method, 0}, + {"(*Conn).StartRequest", Method, 0}, + {"(*Conn).StartResponse", Method, 0}, + {"(*Error).Error", Method, 0}, + {"(*Pipeline).EndRequest", Method, 0}, + {"(*Pipeline).EndResponse", Method, 0}, + {"(*Pipeline).Next", Method, 0}, + {"(*Pipeline).StartRequest", Method, 0}, + {"(*Pipeline).StartResponse", Method, 0}, + {"(*Reader).DotReader", Method, 0}, + {"(*Reader).ReadCodeLine", Method, 0}, + {"(*Reader).ReadContinuedLine", Method, 0}, + {"(*Reader).ReadContinuedLineBytes", Method, 0}, + {"(*Reader).ReadDotBytes", Method, 0}, + {"(*Reader).ReadDotLines", Method, 0}, + {"(*Reader).ReadLine", Method, 0}, + {"(*Reader).ReadLineBytes", Method, 0}, + {"(*Reader).ReadMIMEHeader", Method, 0}, + {"(*Reader).ReadResponse", Method, 0}, + {"(*Writer).DotWriter", Method, 0}, + {"(*Writer).PrintfLine", Method, 0}, + {"(MIMEHeader).Add", Method, 0}, + {"(MIMEHeader).Del", Method, 0}, + {"(MIMEHeader).Get", Method, 0}, + {"(MIMEHeader).Set", Method, 0}, + {"(MIMEHeader).Values", Method, 14}, + {"(ProtocolError).Error", Method, 0}, + {"CanonicalMIMEHeaderKey", Func, 0}, + {"Conn", Type, 0}, + {"Conn.Pipeline", Field, 0}, + {"Conn.Reader", Field, 0}, + {"Conn.Writer", Field, 0}, + {"Dial", Func, 0}, + {"Error", Type, 0}, + {"Error.Code", Field, 0}, + {"Error.Msg", Field, 0}, + {"MIMEHeader", Type, 0}, + {"NewConn", Func, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"Pipeline", Type, 0}, + {"ProtocolError", Type, 0}, + {"Reader", Type, 0}, + {"Reader.R", Field, 0}, + {"TrimBytes", Func, 1}, + {"TrimString", Func, 1}, + {"Writer", Type, 0}, + {"Writer.W", Field, 0}, + }, + "net/url": { + {"(*Error).Error", Method, 0}, + {"(*Error).Temporary", Method, 6}, + {"(*Error).Timeout", Method, 6}, + {"(*Error).Unwrap", Method, 13}, + {"(*URL).EscapedFragment", Method, 15}, + {"(*URL).EscapedPath", Method, 5}, + {"(*URL).Hostname", Method, 8}, + {"(*URL).IsAbs", Method, 0}, + {"(*URL).JoinPath", Method, 19}, + {"(*URL).MarshalBinary", Method, 8}, + {"(*URL).Parse", Method, 0}, + {"(*URL).Port", Method, 8}, + {"(*URL).Query", Method, 0}, + {"(*URL).Redacted", Method, 15}, + {"(*URL).RequestURI", Method, 0}, + {"(*URL).ResolveReference", Method, 0}, + {"(*URL).String", Method, 0}, + {"(*URL).UnmarshalBinary", Method, 8}, + {"(*Userinfo).Password", Method, 0}, + {"(*Userinfo).String", Method, 0}, + {"(*Userinfo).Username", Method, 0}, + {"(EscapeError).Error", Method, 0}, + {"(InvalidHostError).Error", Method, 6}, + {"(Values).Add", Method, 0}, + {"(Values).Del", Method, 0}, + {"(Values).Encode", Method, 0}, + {"(Values).Get", Method, 0}, + {"(Values).Has", Method, 17}, + {"(Values).Set", Method, 0}, + {"Error", Type, 0}, + {"Error.Err", Field, 0}, + {"Error.Op", Field, 0}, + {"Error.URL", Field, 0}, + {"EscapeError", Type, 0}, + {"InvalidHostError", Type, 6}, + {"JoinPath", Func, 19}, + {"Parse", Func, 0}, + {"ParseQuery", Func, 0}, + {"ParseRequestURI", Func, 0}, + {"PathEscape", Func, 8}, + {"PathUnescape", Func, 8}, + {"QueryEscape", Func, 0}, + {"QueryUnescape", Func, 0}, + {"URL", Type, 0}, + {"URL.ForceQuery", Field, 7}, + {"URL.Fragment", Field, 0}, + {"URL.Host", Field, 0}, + {"URL.OmitHost", Field, 19}, + {"URL.Opaque", Field, 0}, + {"URL.Path", Field, 0}, + {"URL.RawFragment", Field, 15}, + {"URL.RawPath", Field, 5}, + {"URL.RawQuery", Field, 0}, + {"URL.Scheme", Field, 0}, + {"URL.User", Field, 0}, + {"User", Func, 0}, + {"UserPassword", Func, 0}, + {"Userinfo", Type, 0}, + {"Values", Type, 0}, + }, + "os": { + {"(*File).Chdir", Method, 0}, + {"(*File).Chmod", Method, 0}, + {"(*File).Chown", Method, 0}, + {"(*File).Close", Method, 0}, + {"(*File).Fd", Method, 0}, + {"(*File).Name", Method, 0}, + {"(*File).Read", Method, 0}, + {"(*File).ReadAt", Method, 0}, + {"(*File).ReadDir", Method, 16}, + {"(*File).ReadFrom", Method, 15}, + {"(*File).Readdir", Method, 0}, + {"(*File).Readdirnames", Method, 0}, + {"(*File).Seek", Method, 0}, + {"(*File).SetDeadline", Method, 10}, + {"(*File).SetReadDeadline", Method, 10}, + {"(*File).SetWriteDeadline", Method, 10}, + {"(*File).Stat", Method, 0}, + {"(*File).Sync", Method, 0}, + {"(*File).SyscallConn", Method, 12}, + {"(*File).Truncate", Method, 0}, + {"(*File).Write", Method, 0}, + {"(*File).WriteAt", Method, 0}, + {"(*File).WriteString", Method, 0}, + {"(*File).WriteTo", Method, 22}, + {"(*LinkError).Error", Method, 0}, + {"(*LinkError).Unwrap", Method, 13}, + {"(*PathError).Error", Method, 0}, + {"(*PathError).Timeout", Method, 10}, + {"(*PathError).Unwrap", Method, 13}, + {"(*Process).Kill", Method, 0}, + {"(*Process).Release", Method, 0}, + {"(*Process).Signal", Method, 0}, + {"(*Process).Wait", Method, 0}, + {"(*ProcessState).ExitCode", Method, 12}, + {"(*ProcessState).Exited", Method, 0}, + {"(*ProcessState).Pid", Method, 0}, + {"(*ProcessState).String", Method, 0}, + {"(*ProcessState).Success", Method, 0}, + {"(*ProcessState).Sys", Method, 0}, + {"(*ProcessState).SysUsage", Method, 0}, + {"(*ProcessState).SystemTime", Method, 0}, + {"(*ProcessState).UserTime", Method, 0}, + {"(*SyscallError).Error", Method, 0}, + {"(*SyscallError).Timeout", Method, 10}, + {"(*SyscallError).Unwrap", Method, 13}, + {"(FileMode).IsDir", Method, 0}, + {"(FileMode).IsRegular", Method, 1}, + {"(FileMode).Perm", Method, 0}, + {"(FileMode).String", Method, 0}, + {"Args", Var, 0}, + {"Chdir", Func, 0}, + {"Chmod", Func, 0}, + {"Chown", Func, 0}, + {"Chtimes", Func, 0}, + {"Clearenv", Func, 0}, + {"Create", Func, 0}, + {"CreateTemp", Func, 16}, + {"DevNull", Const, 0}, + {"DirEntry", Type, 16}, + {"DirFS", Func, 16}, + {"Environ", Func, 0}, + {"ErrClosed", Var, 8}, + {"ErrDeadlineExceeded", Var, 15}, + {"ErrExist", Var, 0}, + {"ErrInvalid", Var, 0}, + {"ErrNoDeadline", Var, 10}, + {"ErrNotExist", Var, 0}, + {"ErrPermission", Var, 0}, + {"ErrProcessDone", Var, 16}, + {"Executable", Func, 8}, + {"Exit", Func, 0}, + {"Expand", Func, 0}, + {"ExpandEnv", Func, 0}, + {"File", Type, 0}, + {"FileInfo", Type, 0}, + {"FileMode", Type, 0}, + {"FindProcess", Func, 0}, + {"Getegid", Func, 0}, + {"Getenv", Func, 0}, + {"Geteuid", Func, 0}, + {"Getgid", Func, 0}, + {"Getgroups", Func, 0}, + {"Getpagesize", Func, 0}, + {"Getpid", Func, 0}, + {"Getppid", Func, 0}, + {"Getuid", Func, 0}, + {"Getwd", Func, 0}, + {"Hostname", Func, 0}, + {"Interrupt", Var, 0}, + {"IsExist", Func, 0}, + {"IsNotExist", Func, 0}, + {"IsPathSeparator", Func, 0}, + {"IsPermission", Func, 0}, + {"IsTimeout", Func, 10}, + {"Kill", Var, 0}, + {"Lchown", Func, 0}, + {"Link", Func, 0}, + {"LinkError", Type, 0}, + {"LinkError.Err", Field, 0}, + {"LinkError.New", Field, 0}, + {"LinkError.Old", Field, 0}, + {"LinkError.Op", Field, 0}, + {"LookupEnv", Func, 5}, + {"Lstat", Func, 0}, + {"Mkdir", Func, 0}, + {"MkdirAll", Func, 0}, + {"MkdirTemp", Func, 16}, + {"ModeAppend", Const, 0}, + {"ModeCharDevice", Const, 0}, + {"ModeDevice", Const, 0}, + {"ModeDir", Const, 0}, + {"ModeExclusive", Const, 0}, + {"ModeIrregular", Const, 11}, + {"ModeNamedPipe", Const, 0}, + {"ModePerm", Const, 0}, + {"ModeSetgid", Const, 0}, + {"ModeSetuid", Const, 0}, + {"ModeSocket", Const, 0}, + {"ModeSticky", Const, 0}, + {"ModeSymlink", Const, 0}, + {"ModeTemporary", Const, 0}, + {"ModeType", Const, 0}, + {"NewFile", Func, 0}, + {"NewSyscallError", Func, 0}, + {"O_APPEND", Const, 0}, + {"O_CREATE", Const, 0}, + {"O_EXCL", Const, 0}, + {"O_RDONLY", Const, 0}, + {"O_RDWR", Const, 0}, + {"O_SYNC", Const, 0}, + {"O_TRUNC", Const, 0}, + {"O_WRONLY", Const, 0}, + {"Open", Func, 0}, + {"OpenFile", Func, 0}, + {"PathError", Type, 0}, + {"PathError.Err", Field, 0}, + {"PathError.Op", Field, 0}, + {"PathError.Path", Field, 0}, + {"PathListSeparator", Const, 0}, + {"PathSeparator", Const, 0}, + {"Pipe", Func, 0}, + {"ProcAttr", Type, 0}, + {"ProcAttr.Dir", Field, 0}, + {"ProcAttr.Env", Field, 0}, + {"ProcAttr.Files", Field, 0}, + {"ProcAttr.Sys", Field, 0}, + {"Process", Type, 0}, + {"Process.Pid", Field, 0}, + {"ProcessState", Type, 0}, + {"ReadDir", Func, 16}, + {"ReadFile", Func, 16}, + {"Readlink", Func, 0}, + {"Remove", Func, 0}, + {"RemoveAll", Func, 0}, + {"Rename", Func, 0}, + {"SEEK_CUR", Const, 0}, + {"SEEK_END", Const, 0}, + {"SEEK_SET", Const, 0}, + {"SameFile", Func, 0}, + {"Setenv", Func, 0}, + {"Signal", Type, 0}, + {"StartProcess", Func, 0}, + {"Stat", Func, 0}, + {"Stderr", Var, 0}, + {"Stdin", Var, 0}, + {"Stdout", Var, 0}, + {"Symlink", Func, 0}, + {"SyscallError", Type, 0}, + {"SyscallError.Err", Field, 0}, + {"SyscallError.Syscall", Field, 0}, + {"TempDir", Func, 0}, + {"Truncate", Func, 0}, + {"Unsetenv", Func, 4}, + {"UserCacheDir", Func, 11}, + {"UserConfigDir", Func, 13}, + {"UserHomeDir", Func, 12}, + {"WriteFile", Func, 16}, + }, + "os/exec": { + {"(*Cmd).CombinedOutput", Method, 0}, + {"(*Cmd).Environ", Method, 19}, + {"(*Cmd).Output", Method, 0}, + {"(*Cmd).Run", Method, 0}, + {"(*Cmd).Start", Method, 0}, + {"(*Cmd).StderrPipe", Method, 0}, + {"(*Cmd).StdinPipe", Method, 0}, + {"(*Cmd).StdoutPipe", Method, 0}, + {"(*Cmd).String", Method, 13}, + {"(*Cmd).Wait", Method, 0}, + {"(*Error).Error", Method, 0}, + {"(*Error).Unwrap", Method, 13}, + {"(*ExitError).Error", Method, 0}, + {"(ExitError).ExitCode", Method, 12}, + {"(ExitError).Exited", Method, 0}, + {"(ExitError).Pid", Method, 0}, + {"(ExitError).String", Method, 0}, + {"(ExitError).Success", Method, 0}, + {"(ExitError).Sys", Method, 0}, + {"(ExitError).SysUsage", Method, 0}, + {"(ExitError).SystemTime", Method, 0}, + {"(ExitError).UserTime", Method, 0}, + {"Cmd", Type, 0}, + {"Cmd.Args", Field, 0}, + {"Cmd.Cancel", Field, 20}, + {"Cmd.Dir", Field, 0}, + {"Cmd.Env", Field, 0}, + {"Cmd.Err", Field, 19}, + {"Cmd.ExtraFiles", Field, 0}, + {"Cmd.Path", Field, 0}, + {"Cmd.Process", Field, 0}, + {"Cmd.ProcessState", Field, 0}, + {"Cmd.Stderr", Field, 0}, + {"Cmd.Stdin", Field, 0}, + {"Cmd.Stdout", Field, 0}, + {"Cmd.SysProcAttr", Field, 0}, + {"Cmd.WaitDelay", Field, 20}, + {"Command", Func, 0}, + {"CommandContext", Func, 7}, + {"ErrDot", Var, 19}, + {"ErrNotFound", Var, 0}, + {"ErrWaitDelay", Var, 20}, + {"Error", Type, 0}, + {"Error.Err", Field, 0}, + {"Error.Name", Field, 0}, + {"ExitError", Type, 0}, + {"ExitError.ProcessState", Field, 0}, + {"ExitError.Stderr", Field, 6}, + {"LookPath", Func, 0}, + }, + "os/signal": { + {"Ignore", Func, 5}, + {"Ignored", Func, 11}, + {"Notify", Func, 0}, + {"NotifyContext", Func, 16}, + {"Reset", Func, 5}, + {"Stop", Func, 1}, + }, + "os/user": { + {"(*User).GroupIds", Method, 7}, + {"(UnknownGroupError).Error", Method, 7}, + {"(UnknownGroupIdError).Error", Method, 7}, + {"(UnknownUserError).Error", Method, 0}, + {"(UnknownUserIdError).Error", Method, 0}, + {"Current", Func, 0}, + {"Group", Type, 7}, + {"Group.Gid", Field, 7}, + {"Group.Name", Field, 7}, + {"Lookup", Func, 0}, + {"LookupGroup", Func, 7}, + {"LookupGroupId", Func, 7}, + {"LookupId", Func, 0}, + {"UnknownGroupError", Type, 7}, + {"UnknownGroupIdError", Type, 7}, + {"UnknownUserError", Type, 0}, + {"UnknownUserIdError", Type, 0}, + {"User", Type, 0}, + {"User.Gid", Field, 0}, + {"User.HomeDir", Field, 0}, + {"User.Name", Field, 0}, + {"User.Uid", Field, 0}, + {"User.Username", Field, 0}, + }, + "path": { + {"Base", Func, 0}, + {"Clean", Func, 0}, + {"Dir", Func, 0}, + {"ErrBadPattern", Var, 0}, + {"Ext", Func, 0}, + {"IsAbs", Func, 0}, + {"Join", Func, 0}, + {"Match", Func, 0}, + {"Split", Func, 0}, + }, + "path/filepath": { + {"Abs", Func, 0}, + {"Base", Func, 0}, + {"Clean", Func, 0}, + {"Dir", Func, 0}, + {"ErrBadPattern", Var, 0}, + {"EvalSymlinks", Func, 0}, + {"Ext", Func, 0}, + {"FromSlash", Func, 0}, + {"Glob", Func, 0}, + {"HasPrefix", Func, 0}, + {"IsAbs", Func, 0}, + {"IsLocal", Func, 20}, + {"Join", Func, 0}, + {"ListSeparator", Const, 0}, + {"Match", Func, 0}, + {"Rel", Func, 0}, + {"Separator", Const, 0}, + {"SkipAll", Var, 20}, + {"SkipDir", Var, 0}, + {"Split", Func, 0}, + {"SplitList", Func, 0}, + {"ToSlash", Func, 0}, + {"VolumeName", Func, 0}, + {"Walk", Func, 0}, + {"WalkDir", Func, 16}, + {"WalkFunc", Type, 0}, + }, + "plugin": { + {"(*Plugin).Lookup", Method, 8}, + {"Open", Func, 8}, + {"Plugin", Type, 8}, + {"Symbol", Type, 8}, + }, + "reflect": { + {"(*MapIter).Key", Method, 12}, + {"(*MapIter).Next", Method, 12}, + {"(*MapIter).Reset", Method, 18}, + {"(*MapIter).Value", Method, 12}, + {"(*ValueError).Error", Method, 0}, + {"(ChanDir).String", Method, 0}, + {"(Kind).String", Method, 0}, + {"(Method).IsExported", Method, 17}, + {"(StructField).IsExported", Method, 17}, + {"(StructTag).Get", Method, 0}, + {"(StructTag).Lookup", Method, 7}, + {"(Value).Addr", Method, 0}, + {"(Value).Bool", Method, 0}, + {"(Value).Bytes", Method, 0}, + {"(Value).Call", Method, 0}, + {"(Value).CallSlice", Method, 0}, + {"(Value).CanAddr", Method, 0}, + {"(Value).CanComplex", Method, 18}, + {"(Value).CanConvert", Method, 17}, + {"(Value).CanFloat", Method, 18}, + {"(Value).CanInt", Method, 18}, + {"(Value).CanInterface", Method, 0}, + {"(Value).CanSet", Method, 0}, + {"(Value).CanUint", Method, 18}, + {"(Value).Cap", Method, 0}, + {"(Value).Clear", Method, 21}, + {"(Value).Close", Method, 0}, + {"(Value).Comparable", Method, 20}, + {"(Value).Complex", Method, 0}, + {"(Value).Convert", Method, 1}, + {"(Value).Elem", Method, 0}, + {"(Value).Equal", Method, 20}, + {"(Value).Field", Method, 0}, + {"(Value).FieldByIndex", Method, 0}, + {"(Value).FieldByIndexErr", Method, 18}, + {"(Value).FieldByName", Method, 0}, + {"(Value).FieldByNameFunc", Method, 0}, + {"(Value).Float", Method, 0}, + {"(Value).Grow", Method, 20}, + {"(Value).Index", Method, 0}, + {"(Value).Int", Method, 0}, + {"(Value).Interface", Method, 0}, + {"(Value).InterfaceData", Method, 0}, + {"(Value).IsNil", Method, 0}, + {"(Value).IsValid", Method, 0}, + {"(Value).IsZero", Method, 13}, + {"(Value).Kind", Method, 0}, + {"(Value).Len", Method, 0}, + {"(Value).MapIndex", Method, 0}, + {"(Value).MapKeys", Method, 0}, + {"(Value).MapRange", Method, 12}, + {"(Value).Method", Method, 0}, + {"(Value).MethodByName", Method, 0}, + {"(Value).NumField", Method, 0}, + {"(Value).NumMethod", Method, 0}, + {"(Value).OverflowComplex", Method, 0}, + {"(Value).OverflowFloat", Method, 0}, + {"(Value).OverflowInt", Method, 0}, + {"(Value).OverflowUint", Method, 0}, + {"(Value).Pointer", Method, 0}, + {"(Value).Recv", Method, 0}, + {"(Value).Send", Method, 0}, + {"(Value).Set", Method, 0}, + {"(Value).SetBool", Method, 0}, + {"(Value).SetBytes", Method, 0}, + {"(Value).SetCap", Method, 2}, + {"(Value).SetComplex", Method, 0}, + {"(Value).SetFloat", Method, 0}, + {"(Value).SetInt", Method, 0}, + {"(Value).SetIterKey", Method, 18}, + {"(Value).SetIterValue", Method, 18}, + {"(Value).SetLen", Method, 0}, + {"(Value).SetMapIndex", Method, 0}, + {"(Value).SetPointer", Method, 0}, + {"(Value).SetString", Method, 0}, + {"(Value).SetUint", Method, 0}, + {"(Value).SetZero", Method, 20}, + {"(Value).Slice", Method, 0}, + {"(Value).Slice3", Method, 2}, + {"(Value).String", Method, 0}, + {"(Value).TryRecv", Method, 0}, + {"(Value).TrySend", Method, 0}, + {"(Value).Type", Method, 0}, + {"(Value).Uint", Method, 0}, + {"(Value).UnsafeAddr", Method, 0}, + {"(Value).UnsafePointer", Method, 18}, + {"Append", Func, 0}, + {"AppendSlice", Func, 0}, + {"Array", Const, 0}, + {"ArrayOf", Func, 5}, + {"Bool", Const, 0}, + {"BothDir", Const, 0}, + {"Chan", Const, 0}, + {"ChanDir", Type, 0}, + {"ChanOf", Func, 1}, + {"Complex128", Const, 0}, + {"Complex64", Const, 0}, + {"Copy", Func, 0}, + {"DeepEqual", Func, 0}, + {"Float32", Const, 0}, + {"Float64", Const, 0}, + {"Func", Const, 0}, + {"FuncOf", Func, 5}, + {"Indirect", Func, 0}, + {"Int", Const, 0}, + {"Int16", Const, 0}, + {"Int32", Const, 0}, + {"Int64", Const, 0}, + {"Int8", Const, 0}, + {"Interface", Const, 0}, + {"Invalid", Const, 0}, + {"Kind", Type, 0}, + {"MakeChan", Func, 0}, + {"MakeFunc", Func, 1}, + {"MakeMap", Func, 0}, + {"MakeMapWithSize", Func, 9}, + {"MakeSlice", Func, 0}, + {"Map", Const, 0}, + {"MapIter", Type, 12}, + {"MapOf", Func, 1}, + {"Method", Type, 0}, + {"Method.Func", Field, 0}, + {"Method.Index", Field, 0}, + {"Method.Name", Field, 0}, + {"Method.PkgPath", Field, 0}, + {"Method.Type", Field, 0}, + {"New", Func, 0}, + {"NewAt", Func, 0}, + {"Pointer", Const, 18}, + {"PointerTo", Func, 18}, + {"Ptr", Const, 0}, + {"PtrTo", Func, 0}, + {"RecvDir", Const, 0}, + {"Select", Func, 1}, + {"SelectCase", Type, 1}, + {"SelectCase.Chan", Field, 1}, + {"SelectCase.Dir", Field, 1}, + {"SelectCase.Send", Field, 1}, + {"SelectDefault", Const, 1}, + {"SelectDir", Type, 1}, + {"SelectRecv", Const, 1}, + {"SelectSend", Const, 1}, + {"SendDir", Const, 0}, + {"Slice", Const, 0}, + {"SliceHeader", Type, 0}, + {"SliceHeader.Cap", Field, 0}, + {"SliceHeader.Data", Field, 0}, + {"SliceHeader.Len", Field, 0}, + {"SliceOf", Func, 1}, + {"String", Const, 0}, + {"StringHeader", Type, 0}, + {"StringHeader.Data", Field, 0}, + {"StringHeader.Len", Field, 0}, + {"Struct", Const, 0}, + {"StructField", Type, 0}, + {"StructField.Anonymous", Field, 0}, + {"StructField.Index", Field, 0}, + {"StructField.Name", Field, 0}, + {"StructField.Offset", Field, 0}, + {"StructField.PkgPath", Field, 0}, + {"StructField.Tag", Field, 0}, + {"StructField.Type", Field, 0}, + {"StructOf", Func, 7}, + {"StructTag", Type, 0}, + {"Swapper", Func, 8}, + {"Type", Type, 0}, + {"TypeFor", Func, 22}, + {"TypeOf", Func, 0}, + {"Uint", Const, 0}, + {"Uint16", Const, 0}, + {"Uint32", Const, 0}, + {"Uint64", Const, 0}, + {"Uint8", Const, 0}, + {"Uintptr", Const, 0}, + {"UnsafePointer", Const, 0}, + {"Value", Type, 0}, + {"ValueError", Type, 0}, + {"ValueError.Kind", Field, 0}, + {"ValueError.Method", Field, 0}, + {"ValueOf", Func, 0}, + {"VisibleFields", Func, 17}, + {"Zero", Func, 0}, + }, + "regexp": { + {"(*Regexp).Copy", Method, 6}, + {"(*Regexp).Expand", Method, 0}, + {"(*Regexp).ExpandString", Method, 0}, + {"(*Regexp).Find", Method, 0}, + {"(*Regexp).FindAll", Method, 0}, + {"(*Regexp).FindAllIndex", Method, 0}, + {"(*Regexp).FindAllString", Method, 0}, + {"(*Regexp).FindAllStringIndex", Method, 0}, + {"(*Regexp).FindAllStringSubmatch", Method, 0}, + {"(*Regexp).FindAllStringSubmatchIndex", Method, 0}, + {"(*Regexp).FindAllSubmatch", Method, 0}, + {"(*Regexp).FindAllSubmatchIndex", Method, 0}, + {"(*Regexp).FindIndex", Method, 0}, + {"(*Regexp).FindReaderIndex", Method, 0}, + {"(*Regexp).FindReaderSubmatchIndex", Method, 0}, + {"(*Regexp).FindString", Method, 0}, + {"(*Regexp).FindStringIndex", Method, 0}, + {"(*Regexp).FindStringSubmatch", Method, 0}, + {"(*Regexp).FindStringSubmatchIndex", Method, 0}, + {"(*Regexp).FindSubmatch", Method, 0}, + {"(*Regexp).FindSubmatchIndex", Method, 0}, + {"(*Regexp).LiteralPrefix", Method, 0}, + {"(*Regexp).Longest", Method, 1}, + {"(*Regexp).MarshalText", Method, 21}, + {"(*Regexp).Match", Method, 0}, + {"(*Regexp).MatchReader", Method, 0}, + {"(*Regexp).MatchString", Method, 0}, + {"(*Regexp).NumSubexp", Method, 0}, + {"(*Regexp).ReplaceAll", Method, 0}, + {"(*Regexp).ReplaceAllFunc", Method, 0}, + {"(*Regexp).ReplaceAllLiteral", Method, 0}, + {"(*Regexp).ReplaceAllLiteralString", Method, 0}, + {"(*Regexp).ReplaceAllString", Method, 0}, + {"(*Regexp).ReplaceAllStringFunc", Method, 0}, + {"(*Regexp).Split", Method, 1}, + {"(*Regexp).String", Method, 0}, + {"(*Regexp).SubexpIndex", Method, 15}, + {"(*Regexp).SubexpNames", Method, 0}, + {"(*Regexp).UnmarshalText", Method, 21}, + {"Compile", Func, 0}, + {"CompilePOSIX", Func, 0}, + {"Match", Func, 0}, + {"MatchReader", Func, 0}, + {"MatchString", Func, 0}, + {"MustCompile", Func, 0}, + {"MustCompilePOSIX", Func, 0}, + {"QuoteMeta", Func, 0}, + {"Regexp", Type, 0}, + }, + "regexp/syntax": { + {"(*Error).Error", Method, 0}, + {"(*Inst).MatchEmptyWidth", Method, 0}, + {"(*Inst).MatchRune", Method, 0}, + {"(*Inst).MatchRunePos", Method, 3}, + {"(*Inst).String", Method, 0}, + {"(*Prog).Prefix", Method, 0}, + {"(*Prog).StartCond", Method, 0}, + {"(*Prog).String", Method, 0}, + {"(*Regexp).CapNames", Method, 0}, + {"(*Regexp).Equal", Method, 0}, + {"(*Regexp).MaxCap", Method, 0}, + {"(*Regexp).Simplify", Method, 0}, + {"(*Regexp).String", Method, 0}, + {"(ErrorCode).String", Method, 0}, + {"(InstOp).String", Method, 3}, + {"(Op).String", Method, 11}, + {"ClassNL", Const, 0}, + {"Compile", Func, 0}, + {"DotNL", Const, 0}, + {"EmptyBeginLine", Const, 0}, + {"EmptyBeginText", Const, 0}, + {"EmptyEndLine", Const, 0}, + {"EmptyEndText", Const, 0}, + {"EmptyNoWordBoundary", Const, 0}, + {"EmptyOp", Type, 0}, + {"EmptyOpContext", Func, 0}, + {"EmptyWordBoundary", Const, 0}, + {"ErrInternalError", Const, 0}, + {"ErrInvalidCharClass", Const, 0}, + {"ErrInvalidCharRange", Const, 0}, + {"ErrInvalidEscape", Const, 0}, + {"ErrInvalidNamedCapture", Const, 0}, + {"ErrInvalidPerlOp", Const, 0}, + {"ErrInvalidRepeatOp", Const, 0}, + {"ErrInvalidRepeatSize", Const, 0}, + {"ErrInvalidUTF8", Const, 0}, + {"ErrLarge", Const, 20}, + {"ErrMissingBracket", Const, 0}, + {"ErrMissingParen", Const, 0}, + {"ErrMissingRepeatArgument", Const, 0}, + {"ErrNestingDepth", Const, 19}, + {"ErrTrailingBackslash", Const, 0}, + {"ErrUnexpectedParen", Const, 1}, + {"Error", Type, 0}, + {"Error.Code", Field, 0}, + {"Error.Expr", Field, 0}, + {"ErrorCode", Type, 0}, + {"Flags", Type, 0}, + {"FoldCase", Const, 0}, + {"Inst", Type, 0}, + {"Inst.Arg", Field, 0}, + {"Inst.Op", Field, 0}, + {"Inst.Out", Field, 0}, + {"Inst.Rune", Field, 0}, + {"InstAlt", Const, 0}, + {"InstAltMatch", Const, 0}, + {"InstCapture", Const, 0}, + {"InstEmptyWidth", Const, 0}, + {"InstFail", Const, 0}, + {"InstMatch", Const, 0}, + {"InstNop", Const, 0}, + {"InstOp", Type, 0}, + {"InstRune", Const, 0}, + {"InstRune1", Const, 0}, + {"InstRuneAny", Const, 0}, + {"InstRuneAnyNotNL", Const, 0}, + {"IsWordChar", Func, 0}, + {"Literal", Const, 0}, + {"MatchNL", Const, 0}, + {"NonGreedy", Const, 0}, + {"OneLine", Const, 0}, + {"Op", Type, 0}, + {"OpAlternate", Const, 0}, + {"OpAnyChar", Const, 0}, + {"OpAnyCharNotNL", Const, 0}, + {"OpBeginLine", Const, 0}, + {"OpBeginText", Const, 0}, + {"OpCapture", Const, 0}, + {"OpCharClass", Const, 0}, + {"OpConcat", Const, 0}, + {"OpEmptyMatch", Const, 0}, + {"OpEndLine", Const, 0}, + {"OpEndText", Const, 0}, + {"OpLiteral", Const, 0}, + {"OpNoMatch", Const, 0}, + {"OpNoWordBoundary", Const, 0}, + {"OpPlus", Const, 0}, + {"OpQuest", Const, 0}, + {"OpRepeat", Const, 0}, + {"OpStar", Const, 0}, + {"OpWordBoundary", Const, 0}, + {"POSIX", Const, 0}, + {"Parse", Func, 0}, + {"Perl", Const, 0}, + {"PerlX", Const, 0}, + {"Prog", Type, 0}, + {"Prog.Inst", Field, 0}, + {"Prog.NumCap", Field, 0}, + {"Prog.Start", Field, 0}, + {"Regexp", Type, 0}, + {"Regexp.Cap", Field, 0}, + {"Regexp.Flags", Field, 0}, + {"Regexp.Max", Field, 0}, + {"Regexp.Min", Field, 0}, + {"Regexp.Name", Field, 0}, + {"Regexp.Op", Field, 0}, + {"Regexp.Rune", Field, 0}, + {"Regexp.Rune0", Field, 0}, + {"Regexp.Sub", Field, 0}, + {"Regexp.Sub0", Field, 0}, + {"Simple", Const, 0}, + {"UnicodeGroups", Const, 0}, + {"WasDollar", Const, 0}, + }, + "runtime": { + {"(*BlockProfileRecord).Stack", Method, 1}, + {"(*Frames).Next", Method, 7}, + {"(*Func).Entry", Method, 0}, + {"(*Func).FileLine", Method, 0}, + {"(*Func).Name", Method, 0}, + {"(*MemProfileRecord).InUseBytes", Method, 0}, + {"(*MemProfileRecord).InUseObjects", Method, 0}, + {"(*MemProfileRecord).Stack", Method, 0}, + {"(*PanicNilError).Error", Method, 21}, + {"(*PanicNilError).RuntimeError", Method, 21}, + {"(*Pinner).Pin", Method, 21}, + {"(*Pinner).Unpin", Method, 21}, + {"(*StackRecord).Stack", Method, 0}, + {"(*TypeAssertionError).Error", Method, 0}, + {"(*TypeAssertionError).RuntimeError", Method, 0}, + {"BlockProfile", Func, 1}, + {"BlockProfileRecord", Type, 1}, + {"BlockProfileRecord.Count", Field, 1}, + {"BlockProfileRecord.Cycles", Field, 1}, + {"BlockProfileRecord.StackRecord", Field, 1}, + {"Breakpoint", Func, 0}, + {"CPUProfile", Func, 0}, + {"Caller", Func, 0}, + {"Callers", Func, 0}, + {"CallersFrames", Func, 7}, + {"Compiler", Const, 0}, + {"Error", Type, 0}, + {"Frame", Type, 7}, + {"Frame.Entry", Field, 7}, + {"Frame.File", Field, 7}, + {"Frame.Func", Field, 7}, + {"Frame.Function", Field, 7}, + {"Frame.Line", Field, 7}, + {"Frame.PC", Field, 7}, + {"Frames", Type, 7}, + {"Func", Type, 0}, + {"FuncForPC", Func, 0}, + {"GC", Func, 0}, + {"GOARCH", Const, 0}, + {"GOMAXPROCS", Func, 0}, + {"GOOS", Const, 0}, + {"GOROOT", Func, 0}, + {"Goexit", Func, 0}, + {"GoroutineProfile", Func, 0}, + {"Gosched", Func, 0}, + {"KeepAlive", Func, 7}, + {"LockOSThread", Func, 0}, + {"MemProfile", Func, 0}, + {"MemProfileRate", Var, 0}, + {"MemProfileRecord", Type, 0}, + {"MemProfileRecord.AllocBytes", Field, 0}, + {"MemProfileRecord.AllocObjects", Field, 0}, + {"MemProfileRecord.FreeBytes", Field, 0}, + {"MemProfileRecord.FreeObjects", Field, 0}, + {"MemProfileRecord.Stack0", Field, 0}, + {"MemStats", Type, 0}, + {"MemStats.Alloc", Field, 0}, + {"MemStats.BuckHashSys", Field, 0}, + {"MemStats.BySize", Field, 0}, + {"MemStats.DebugGC", Field, 0}, + {"MemStats.EnableGC", Field, 0}, + {"MemStats.Frees", Field, 0}, + {"MemStats.GCCPUFraction", Field, 5}, + {"MemStats.GCSys", Field, 2}, + {"MemStats.HeapAlloc", Field, 0}, + {"MemStats.HeapIdle", Field, 0}, + {"MemStats.HeapInuse", Field, 0}, + {"MemStats.HeapObjects", Field, 0}, + {"MemStats.HeapReleased", Field, 0}, + {"MemStats.HeapSys", Field, 0}, + {"MemStats.LastGC", Field, 0}, + {"MemStats.Lookups", Field, 0}, + {"MemStats.MCacheInuse", Field, 0}, + {"MemStats.MCacheSys", Field, 0}, + {"MemStats.MSpanInuse", Field, 0}, + {"MemStats.MSpanSys", Field, 0}, + {"MemStats.Mallocs", Field, 0}, + {"MemStats.NextGC", Field, 0}, + {"MemStats.NumForcedGC", Field, 8}, + {"MemStats.NumGC", Field, 0}, + {"MemStats.OtherSys", Field, 2}, + {"MemStats.PauseEnd", Field, 4}, + {"MemStats.PauseNs", Field, 0}, + {"MemStats.PauseTotalNs", Field, 0}, + {"MemStats.StackInuse", Field, 0}, + {"MemStats.StackSys", Field, 0}, + {"MemStats.Sys", Field, 0}, + {"MemStats.TotalAlloc", Field, 0}, + {"MutexProfile", Func, 8}, + {"NumCPU", Func, 0}, + {"NumCgoCall", Func, 0}, + {"NumGoroutine", Func, 0}, + {"PanicNilError", Type, 21}, + {"Pinner", Type, 21}, + {"ReadMemStats", Func, 0}, + {"ReadTrace", Func, 5}, + {"SetBlockProfileRate", Func, 1}, + {"SetCPUProfileRate", Func, 0}, + {"SetCgoTraceback", Func, 7}, + {"SetFinalizer", Func, 0}, + {"SetMutexProfileFraction", Func, 8}, + {"Stack", Func, 0}, + {"StackRecord", Type, 0}, + {"StackRecord.Stack0", Field, 0}, + {"StartTrace", Func, 5}, + {"StopTrace", Func, 5}, + {"ThreadCreateProfile", Func, 0}, + {"TypeAssertionError", Type, 0}, + {"UnlockOSThread", Func, 0}, + {"Version", Func, 0}, + }, + "runtime/cgo": { + {"(Handle).Delete", Method, 17}, + {"(Handle).Value", Method, 17}, + {"Handle", Type, 17}, + {"Incomplete", Type, 20}, + {"NewHandle", Func, 17}, + }, + "runtime/coverage": { + {"ClearCounters", Func, 20}, + {"WriteCounters", Func, 20}, + {"WriteCountersDir", Func, 20}, + {"WriteMeta", Func, 20}, + {"WriteMetaDir", Func, 20}, + }, + "runtime/debug": { + {"(*BuildInfo).String", Method, 18}, + {"BuildInfo", Type, 12}, + {"BuildInfo.Deps", Field, 12}, + {"BuildInfo.GoVersion", Field, 18}, + {"BuildInfo.Main", Field, 12}, + {"BuildInfo.Path", Field, 12}, + {"BuildInfo.Settings", Field, 18}, + {"BuildSetting", Type, 18}, + {"BuildSetting.Key", Field, 18}, + {"BuildSetting.Value", Field, 18}, + {"FreeOSMemory", Func, 1}, + {"GCStats", Type, 1}, + {"GCStats.LastGC", Field, 1}, + {"GCStats.NumGC", Field, 1}, + {"GCStats.Pause", Field, 1}, + {"GCStats.PauseEnd", Field, 4}, + {"GCStats.PauseQuantiles", Field, 1}, + {"GCStats.PauseTotal", Field, 1}, + {"Module", Type, 12}, + {"Module.Path", Field, 12}, + {"Module.Replace", Field, 12}, + {"Module.Sum", Field, 12}, + {"Module.Version", Field, 12}, + {"ParseBuildInfo", Func, 18}, + {"PrintStack", Func, 0}, + {"ReadBuildInfo", Func, 12}, + {"ReadGCStats", Func, 1}, + {"SetGCPercent", Func, 1}, + {"SetMaxStack", Func, 2}, + {"SetMaxThreads", Func, 2}, + {"SetMemoryLimit", Func, 19}, + {"SetPanicOnFault", Func, 3}, + {"SetTraceback", Func, 6}, + {"Stack", Func, 0}, + {"WriteHeapDump", Func, 3}, + }, + "runtime/metrics": { + {"(Value).Float64", Method, 16}, + {"(Value).Float64Histogram", Method, 16}, + {"(Value).Kind", Method, 16}, + {"(Value).Uint64", Method, 16}, + {"All", Func, 16}, + {"Description", Type, 16}, + {"Description.Cumulative", Field, 16}, + {"Description.Description", Field, 16}, + {"Description.Kind", Field, 16}, + {"Description.Name", Field, 16}, + {"Float64Histogram", Type, 16}, + {"Float64Histogram.Buckets", Field, 16}, + {"Float64Histogram.Counts", Field, 16}, + {"KindBad", Const, 16}, + {"KindFloat64", Const, 16}, + {"KindFloat64Histogram", Const, 16}, + {"KindUint64", Const, 16}, + {"Read", Func, 16}, + {"Sample", Type, 16}, + {"Sample.Name", Field, 16}, + {"Sample.Value", Field, 16}, + {"Value", Type, 16}, + {"ValueKind", Type, 16}, + }, + "runtime/pprof": { + {"(*Profile).Add", Method, 0}, + {"(*Profile).Count", Method, 0}, + {"(*Profile).Name", Method, 0}, + {"(*Profile).Remove", Method, 0}, + {"(*Profile).WriteTo", Method, 0}, + {"Do", Func, 9}, + {"ForLabels", Func, 9}, + {"Label", Func, 9}, + {"LabelSet", Type, 9}, + {"Labels", Func, 9}, + {"Lookup", Func, 0}, + {"NewProfile", Func, 0}, + {"Profile", Type, 0}, + {"Profiles", Func, 0}, + {"SetGoroutineLabels", Func, 9}, + {"StartCPUProfile", Func, 0}, + {"StopCPUProfile", Func, 0}, + {"WithLabels", Func, 9}, + {"WriteHeapProfile", Func, 0}, + }, + "runtime/trace": { + {"(*Region).End", Method, 11}, + {"(*Task).End", Method, 11}, + {"IsEnabled", Func, 11}, + {"Log", Func, 11}, + {"Logf", Func, 11}, + {"NewTask", Func, 11}, + {"Region", Type, 11}, + {"Start", Func, 5}, + {"StartRegion", Func, 11}, + {"Stop", Func, 5}, + {"Task", Type, 11}, + {"WithRegion", Func, 11}, + }, + "slices": { + {"BinarySearch", Func, 21}, + {"BinarySearchFunc", Func, 21}, + {"Clip", Func, 21}, + {"Clone", Func, 21}, + {"Compact", Func, 21}, + {"CompactFunc", Func, 21}, + {"Compare", Func, 21}, + {"CompareFunc", Func, 21}, + {"Concat", Func, 22}, + {"Contains", Func, 21}, + {"ContainsFunc", Func, 21}, + {"Delete", Func, 21}, + {"DeleteFunc", Func, 21}, + {"Equal", Func, 21}, + {"EqualFunc", Func, 21}, + {"Grow", Func, 21}, + {"Index", Func, 21}, + {"IndexFunc", Func, 21}, + {"Insert", Func, 21}, + {"IsSorted", Func, 21}, + {"IsSortedFunc", Func, 21}, + {"Max", Func, 21}, + {"MaxFunc", Func, 21}, + {"Min", Func, 21}, + {"MinFunc", Func, 21}, + {"Replace", Func, 21}, + {"Reverse", Func, 21}, + {"Sort", Func, 21}, + {"SortFunc", Func, 21}, + {"SortStableFunc", Func, 21}, + }, + "sort": { + {"(Float64Slice).Len", Method, 0}, + {"(Float64Slice).Less", Method, 0}, + {"(Float64Slice).Search", Method, 0}, + {"(Float64Slice).Sort", Method, 0}, + {"(Float64Slice).Swap", Method, 0}, + {"(IntSlice).Len", Method, 0}, + {"(IntSlice).Less", Method, 0}, + {"(IntSlice).Search", Method, 0}, + {"(IntSlice).Sort", Method, 0}, + {"(IntSlice).Swap", Method, 0}, + {"(StringSlice).Len", Method, 0}, + {"(StringSlice).Less", Method, 0}, + {"(StringSlice).Search", Method, 0}, + {"(StringSlice).Sort", Method, 0}, + {"(StringSlice).Swap", Method, 0}, + {"Find", Func, 19}, + {"Float64Slice", Type, 0}, + {"Float64s", Func, 0}, + {"Float64sAreSorted", Func, 0}, + {"IntSlice", Type, 0}, + {"Interface", Type, 0}, + {"Ints", Func, 0}, + {"IntsAreSorted", Func, 0}, + {"IsSorted", Func, 0}, + {"Reverse", Func, 1}, + {"Search", Func, 0}, + {"SearchFloat64s", Func, 0}, + {"SearchInts", Func, 0}, + {"SearchStrings", Func, 0}, + {"Slice", Func, 8}, + {"SliceIsSorted", Func, 8}, + {"SliceStable", Func, 8}, + {"Sort", Func, 0}, + {"Stable", Func, 2}, + {"StringSlice", Type, 0}, + {"Strings", Func, 0}, + {"StringsAreSorted", Func, 0}, + }, + "strconv": { + {"(*NumError).Error", Method, 0}, + {"(*NumError).Unwrap", Method, 14}, + {"AppendBool", Func, 0}, + {"AppendFloat", Func, 0}, + {"AppendInt", Func, 0}, + {"AppendQuote", Func, 0}, + {"AppendQuoteRune", Func, 0}, + {"AppendQuoteRuneToASCII", Func, 0}, + {"AppendQuoteRuneToGraphic", Func, 6}, + {"AppendQuoteToASCII", Func, 0}, + {"AppendQuoteToGraphic", Func, 6}, + {"AppendUint", Func, 0}, + {"Atoi", Func, 0}, + {"CanBackquote", Func, 0}, + {"ErrRange", Var, 0}, + {"ErrSyntax", Var, 0}, + {"FormatBool", Func, 0}, + {"FormatComplex", Func, 15}, + {"FormatFloat", Func, 0}, + {"FormatInt", Func, 0}, + {"FormatUint", Func, 0}, + {"IntSize", Const, 0}, + {"IsGraphic", Func, 6}, + {"IsPrint", Func, 0}, + {"Itoa", Func, 0}, + {"NumError", Type, 0}, + {"NumError.Err", Field, 0}, + {"NumError.Func", Field, 0}, + {"NumError.Num", Field, 0}, + {"ParseBool", Func, 0}, + {"ParseComplex", Func, 15}, + {"ParseFloat", Func, 0}, + {"ParseInt", Func, 0}, + {"ParseUint", Func, 0}, + {"Quote", Func, 0}, + {"QuoteRune", Func, 0}, + {"QuoteRuneToASCII", Func, 0}, + {"QuoteRuneToGraphic", Func, 6}, + {"QuoteToASCII", Func, 0}, + {"QuoteToGraphic", Func, 6}, + {"QuotedPrefix", Func, 17}, + {"Unquote", Func, 0}, + {"UnquoteChar", Func, 0}, + }, + "strings": { + {"(*Builder).Cap", Method, 12}, + {"(*Builder).Grow", Method, 10}, + {"(*Builder).Len", Method, 10}, + {"(*Builder).Reset", Method, 10}, + {"(*Builder).String", Method, 10}, + {"(*Builder).Write", Method, 10}, + {"(*Builder).WriteByte", Method, 10}, + {"(*Builder).WriteRune", Method, 10}, + {"(*Builder).WriteString", Method, 10}, + {"(*Reader).Len", Method, 0}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).ReadAt", Method, 0}, + {"(*Reader).ReadByte", Method, 0}, + {"(*Reader).ReadRune", Method, 0}, + {"(*Reader).Reset", Method, 7}, + {"(*Reader).Seek", Method, 0}, + {"(*Reader).Size", Method, 5}, + {"(*Reader).UnreadByte", Method, 0}, + {"(*Reader).UnreadRune", Method, 0}, + {"(*Reader).WriteTo", Method, 1}, + {"(*Replacer).Replace", Method, 0}, + {"(*Replacer).WriteString", Method, 0}, + {"Builder", Type, 10}, + {"Clone", Func, 18}, + {"Compare", Func, 5}, + {"Contains", Func, 0}, + {"ContainsAny", Func, 0}, + {"ContainsFunc", Func, 21}, + {"ContainsRune", Func, 0}, + {"Count", Func, 0}, + {"Cut", Func, 18}, + {"CutPrefix", Func, 20}, + {"CutSuffix", Func, 20}, + {"EqualFold", Func, 0}, + {"Fields", Func, 0}, + {"FieldsFunc", Func, 0}, + {"HasPrefix", Func, 0}, + {"HasSuffix", Func, 0}, + {"Index", Func, 0}, + {"IndexAny", Func, 0}, + {"IndexByte", Func, 2}, + {"IndexFunc", Func, 0}, + {"IndexRune", Func, 0}, + {"Join", Func, 0}, + {"LastIndex", Func, 0}, + {"LastIndexAny", Func, 0}, + {"LastIndexByte", Func, 5}, + {"LastIndexFunc", Func, 0}, + {"Map", Func, 0}, + {"NewReader", Func, 0}, + {"NewReplacer", Func, 0}, + {"Reader", Type, 0}, + {"Repeat", Func, 0}, + {"Replace", Func, 0}, + {"ReplaceAll", Func, 12}, + {"Replacer", Type, 0}, + {"Split", Func, 0}, + {"SplitAfter", Func, 0}, + {"SplitAfterN", Func, 0}, + {"SplitN", Func, 0}, + {"Title", Func, 0}, + {"ToLower", Func, 0}, + {"ToLowerSpecial", Func, 0}, + {"ToTitle", Func, 0}, + {"ToTitleSpecial", Func, 0}, + {"ToUpper", Func, 0}, + {"ToUpperSpecial", Func, 0}, + {"ToValidUTF8", Func, 13}, + {"Trim", Func, 0}, + {"TrimFunc", Func, 0}, + {"TrimLeft", Func, 0}, + {"TrimLeftFunc", Func, 0}, + {"TrimPrefix", Func, 1}, + {"TrimRight", Func, 0}, + {"TrimRightFunc", Func, 0}, + {"TrimSpace", Func, 0}, + {"TrimSuffix", Func, 1}, + }, + "sync": { + {"(*Cond).Broadcast", Method, 0}, + {"(*Cond).Signal", Method, 0}, + {"(*Cond).Wait", Method, 0}, + {"(*Map).CompareAndDelete", Method, 20}, + {"(*Map).CompareAndSwap", Method, 20}, + {"(*Map).Delete", Method, 9}, + {"(*Map).Load", Method, 9}, + {"(*Map).LoadAndDelete", Method, 15}, + {"(*Map).LoadOrStore", Method, 9}, + {"(*Map).Range", Method, 9}, + {"(*Map).Store", Method, 9}, + {"(*Map).Swap", Method, 20}, + {"(*Mutex).Lock", Method, 0}, + {"(*Mutex).TryLock", Method, 18}, + {"(*Mutex).Unlock", Method, 0}, + {"(*Once).Do", Method, 0}, + {"(*Pool).Get", Method, 3}, + {"(*Pool).Put", Method, 3}, + {"(*RWMutex).Lock", Method, 0}, + {"(*RWMutex).RLock", Method, 0}, + {"(*RWMutex).RLocker", Method, 0}, + {"(*RWMutex).RUnlock", Method, 0}, + {"(*RWMutex).TryLock", Method, 18}, + {"(*RWMutex).TryRLock", Method, 18}, + {"(*RWMutex).Unlock", Method, 0}, + {"(*WaitGroup).Add", Method, 0}, + {"(*WaitGroup).Done", Method, 0}, + {"(*WaitGroup).Wait", Method, 0}, + {"Cond", Type, 0}, + {"Cond.L", Field, 0}, + {"Locker", Type, 0}, + {"Map", Type, 9}, + {"Mutex", Type, 0}, + {"NewCond", Func, 0}, + {"Once", Type, 0}, + {"OnceFunc", Func, 21}, + {"OnceValue", Func, 21}, + {"OnceValues", Func, 21}, + {"Pool", Type, 3}, + {"Pool.New", Field, 3}, + {"RWMutex", Type, 0}, + {"WaitGroup", Type, 0}, + }, + "sync/atomic": { + {"(*Bool).CompareAndSwap", Method, 19}, + {"(*Bool).Load", Method, 19}, + {"(*Bool).Store", Method, 19}, + {"(*Bool).Swap", Method, 19}, + {"(*Int32).Add", Method, 19}, + {"(*Int32).CompareAndSwap", Method, 19}, + {"(*Int32).Load", Method, 19}, + {"(*Int32).Store", Method, 19}, + {"(*Int32).Swap", Method, 19}, + {"(*Int64).Add", Method, 19}, + {"(*Int64).CompareAndSwap", Method, 19}, + {"(*Int64).Load", Method, 19}, + {"(*Int64).Store", Method, 19}, + {"(*Int64).Swap", Method, 19}, + {"(*Pointer).CompareAndSwap", Method, 19}, + {"(*Pointer).Load", Method, 19}, + {"(*Pointer).Store", Method, 19}, + {"(*Pointer).Swap", Method, 19}, + {"(*Uint32).Add", Method, 19}, + {"(*Uint32).CompareAndSwap", Method, 19}, + {"(*Uint32).Load", Method, 19}, + {"(*Uint32).Store", Method, 19}, + {"(*Uint32).Swap", Method, 19}, + {"(*Uint64).Add", Method, 19}, + {"(*Uint64).CompareAndSwap", Method, 19}, + {"(*Uint64).Load", Method, 19}, + {"(*Uint64).Store", Method, 19}, + {"(*Uint64).Swap", Method, 19}, + {"(*Uintptr).Add", Method, 19}, + {"(*Uintptr).CompareAndSwap", Method, 19}, + {"(*Uintptr).Load", Method, 19}, + {"(*Uintptr).Store", Method, 19}, + {"(*Uintptr).Swap", Method, 19}, + {"(*Value).CompareAndSwap", Method, 17}, + {"(*Value).Load", Method, 4}, + {"(*Value).Store", Method, 4}, + {"(*Value).Swap", Method, 17}, + {"AddInt32", Func, 0}, + {"AddInt64", Func, 0}, + {"AddUint32", Func, 0}, + {"AddUint64", Func, 0}, + {"AddUintptr", Func, 0}, + {"Bool", Type, 19}, + {"CompareAndSwapInt32", Func, 0}, + {"CompareAndSwapInt64", Func, 0}, + {"CompareAndSwapPointer", Func, 0}, + {"CompareAndSwapUint32", Func, 0}, + {"CompareAndSwapUint64", Func, 0}, + {"CompareAndSwapUintptr", Func, 0}, + {"Int32", Type, 19}, + {"Int64", Type, 19}, + {"LoadInt32", Func, 0}, + {"LoadInt64", Func, 0}, + {"LoadPointer", Func, 0}, + {"LoadUint32", Func, 0}, + {"LoadUint64", Func, 0}, + {"LoadUintptr", Func, 0}, + {"Pointer", Type, 19}, + {"StoreInt32", Func, 0}, + {"StoreInt64", Func, 0}, + {"StorePointer", Func, 0}, + {"StoreUint32", Func, 0}, + {"StoreUint64", Func, 0}, + {"StoreUintptr", Func, 0}, + {"SwapInt32", Func, 2}, + {"SwapInt64", Func, 2}, + {"SwapPointer", Func, 2}, + {"SwapUint32", Func, 2}, + {"SwapUint64", Func, 2}, + {"SwapUintptr", Func, 2}, + {"Uint32", Type, 19}, + {"Uint64", Type, 19}, + {"Uintptr", Type, 19}, + {"Value", Type, 4}, + }, + "syscall": { + {"(*Cmsghdr).SetLen", Method, 0}, + {"(*DLL).FindProc", Method, 0}, + {"(*DLL).MustFindProc", Method, 0}, + {"(*DLL).Release", Method, 0}, + {"(*DLLError).Error", Method, 0}, + {"(*DLLError).Unwrap", Method, 16}, + {"(*Filetime).Nanoseconds", Method, 0}, + {"(*Iovec).SetLen", Method, 0}, + {"(*LazyDLL).Handle", Method, 0}, + {"(*LazyDLL).Load", Method, 0}, + {"(*LazyDLL).NewProc", Method, 0}, + {"(*LazyProc).Addr", Method, 0}, + {"(*LazyProc).Call", Method, 0}, + {"(*LazyProc).Find", Method, 0}, + {"(*Msghdr).SetControllen", Method, 0}, + {"(*Proc).Addr", Method, 0}, + {"(*Proc).Call", Method, 0}, + {"(*PtraceRegs).PC", Method, 0}, + {"(*PtraceRegs).SetPC", Method, 0}, + {"(*RawSockaddrAny).Sockaddr", Method, 0}, + {"(*SID).Copy", Method, 0}, + {"(*SID).Len", Method, 0}, + {"(*SID).LookupAccount", Method, 0}, + {"(*SID).String", Method, 0}, + {"(*Timespec).Nano", Method, 0}, + {"(*Timespec).Unix", Method, 0}, + {"(*Timeval).Nano", Method, 0}, + {"(*Timeval).Nanoseconds", Method, 0}, + {"(*Timeval).Unix", Method, 0}, + {"(Errno).Error", Method, 0}, + {"(Errno).Is", Method, 13}, + {"(Errno).Temporary", Method, 0}, + {"(Errno).Timeout", Method, 0}, + {"(Signal).Signal", Method, 0}, + {"(Signal).String", Method, 0}, + {"(Token).Close", Method, 0}, + {"(Token).GetTokenPrimaryGroup", Method, 0}, + {"(Token).GetTokenUser", Method, 0}, + {"(Token).GetUserProfileDirectory", Method, 0}, + {"(WaitStatus).Continued", Method, 0}, + {"(WaitStatus).CoreDump", Method, 0}, + {"(WaitStatus).ExitStatus", Method, 0}, + {"(WaitStatus).Exited", Method, 0}, + {"(WaitStatus).Signal", Method, 0}, + {"(WaitStatus).Signaled", Method, 0}, + {"(WaitStatus).StopSignal", Method, 0}, + {"(WaitStatus).Stopped", Method, 0}, + {"(WaitStatus).TrapCause", Method, 0}, + {"AF_ALG", Const, 0}, + {"AF_APPLETALK", Const, 0}, + {"AF_ARP", Const, 0}, + {"AF_ASH", Const, 0}, + {"AF_ATM", Const, 0}, + {"AF_ATMPVC", Const, 0}, + {"AF_ATMSVC", Const, 0}, + {"AF_AX25", Const, 0}, + {"AF_BLUETOOTH", Const, 0}, + {"AF_BRIDGE", Const, 0}, + {"AF_CAIF", Const, 0}, + {"AF_CAN", Const, 0}, + {"AF_CCITT", Const, 0}, + {"AF_CHAOS", Const, 0}, + {"AF_CNT", Const, 0}, + {"AF_COIP", Const, 0}, + {"AF_DATAKIT", Const, 0}, + {"AF_DECnet", Const, 0}, + {"AF_DLI", Const, 0}, + {"AF_E164", Const, 0}, + {"AF_ECMA", Const, 0}, + {"AF_ECONET", Const, 0}, + {"AF_ENCAP", Const, 1}, + {"AF_FILE", Const, 0}, + {"AF_HYLINK", Const, 0}, + {"AF_IEEE80211", Const, 0}, + {"AF_IEEE802154", Const, 0}, + {"AF_IMPLINK", Const, 0}, + {"AF_INET", Const, 0}, + {"AF_INET6", Const, 0}, + {"AF_INET6_SDP", Const, 3}, + {"AF_INET_SDP", Const, 3}, + {"AF_IPX", Const, 0}, + {"AF_IRDA", Const, 0}, + {"AF_ISDN", Const, 0}, + {"AF_ISO", Const, 0}, + {"AF_IUCV", Const, 0}, + {"AF_KEY", Const, 0}, + {"AF_LAT", Const, 0}, + {"AF_LINK", Const, 0}, + {"AF_LLC", Const, 0}, + {"AF_LOCAL", Const, 0}, + {"AF_MAX", Const, 0}, + {"AF_MPLS", Const, 1}, + {"AF_NATM", Const, 0}, + {"AF_NDRV", Const, 0}, + {"AF_NETBEUI", Const, 0}, + {"AF_NETBIOS", Const, 0}, + {"AF_NETGRAPH", Const, 0}, + {"AF_NETLINK", Const, 0}, + {"AF_NETROM", Const, 0}, + {"AF_NS", Const, 0}, + {"AF_OROUTE", Const, 1}, + {"AF_OSI", Const, 0}, + {"AF_PACKET", Const, 0}, + {"AF_PHONET", Const, 0}, + {"AF_PPP", Const, 0}, + {"AF_PPPOX", Const, 0}, + {"AF_PUP", Const, 0}, + {"AF_RDS", Const, 0}, + {"AF_RESERVED_36", Const, 0}, + {"AF_ROSE", Const, 0}, + {"AF_ROUTE", Const, 0}, + {"AF_RXRPC", Const, 0}, + {"AF_SCLUSTER", Const, 0}, + {"AF_SECURITY", Const, 0}, + {"AF_SIP", Const, 0}, + {"AF_SLOW", Const, 0}, + {"AF_SNA", Const, 0}, + {"AF_SYSTEM", Const, 0}, + {"AF_TIPC", Const, 0}, + {"AF_UNIX", Const, 0}, + {"AF_UNSPEC", Const, 0}, + {"AF_UTUN", Const, 16}, + {"AF_VENDOR00", Const, 0}, + {"AF_VENDOR01", Const, 0}, + {"AF_VENDOR02", Const, 0}, + {"AF_VENDOR03", Const, 0}, + {"AF_VENDOR04", Const, 0}, + {"AF_VENDOR05", Const, 0}, + {"AF_VENDOR06", Const, 0}, + {"AF_VENDOR07", Const, 0}, + {"AF_VENDOR08", Const, 0}, + {"AF_VENDOR09", Const, 0}, + {"AF_VENDOR10", Const, 0}, + {"AF_VENDOR11", Const, 0}, + {"AF_VENDOR12", Const, 0}, + {"AF_VENDOR13", Const, 0}, + {"AF_VENDOR14", Const, 0}, + {"AF_VENDOR15", Const, 0}, + {"AF_VENDOR16", Const, 0}, + {"AF_VENDOR17", Const, 0}, + {"AF_VENDOR18", Const, 0}, + {"AF_VENDOR19", Const, 0}, + {"AF_VENDOR20", Const, 0}, + {"AF_VENDOR21", Const, 0}, + {"AF_VENDOR22", Const, 0}, + {"AF_VENDOR23", Const, 0}, + {"AF_VENDOR24", Const, 0}, + {"AF_VENDOR25", Const, 0}, + {"AF_VENDOR26", Const, 0}, + {"AF_VENDOR27", Const, 0}, + {"AF_VENDOR28", Const, 0}, + {"AF_VENDOR29", Const, 0}, + {"AF_VENDOR30", Const, 0}, + {"AF_VENDOR31", Const, 0}, + {"AF_VENDOR32", Const, 0}, + {"AF_VENDOR33", Const, 0}, + {"AF_VENDOR34", Const, 0}, + {"AF_VENDOR35", Const, 0}, + {"AF_VENDOR36", Const, 0}, + {"AF_VENDOR37", Const, 0}, + {"AF_VENDOR38", Const, 0}, + {"AF_VENDOR39", Const, 0}, + {"AF_VENDOR40", Const, 0}, + {"AF_VENDOR41", Const, 0}, + {"AF_VENDOR42", Const, 0}, + {"AF_VENDOR43", Const, 0}, + {"AF_VENDOR44", Const, 0}, + {"AF_VENDOR45", Const, 0}, + {"AF_VENDOR46", Const, 0}, + {"AF_VENDOR47", Const, 0}, + {"AF_WANPIPE", Const, 0}, + {"AF_X25", Const, 0}, + {"AI_CANONNAME", Const, 1}, + {"AI_NUMERICHOST", Const, 1}, + {"AI_PASSIVE", Const, 1}, + {"APPLICATION_ERROR", Const, 0}, + {"ARPHRD_ADAPT", Const, 0}, + {"ARPHRD_APPLETLK", Const, 0}, + {"ARPHRD_ARCNET", Const, 0}, + {"ARPHRD_ASH", Const, 0}, + {"ARPHRD_ATM", Const, 0}, + {"ARPHRD_AX25", Const, 0}, + {"ARPHRD_BIF", Const, 0}, + {"ARPHRD_CHAOS", Const, 0}, + {"ARPHRD_CISCO", Const, 0}, + {"ARPHRD_CSLIP", Const, 0}, + {"ARPHRD_CSLIP6", Const, 0}, + {"ARPHRD_DDCMP", Const, 0}, + {"ARPHRD_DLCI", Const, 0}, + {"ARPHRD_ECONET", Const, 0}, + {"ARPHRD_EETHER", Const, 0}, + {"ARPHRD_ETHER", Const, 0}, + {"ARPHRD_EUI64", Const, 0}, + {"ARPHRD_FCAL", Const, 0}, + {"ARPHRD_FCFABRIC", Const, 0}, + {"ARPHRD_FCPL", Const, 0}, + {"ARPHRD_FCPP", Const, 0}, + {"ARPHRD_FDDI", Const, 0}, + {"ARPHRD_FRAD", Const, 0}, + {"ARPHRD_FRELAY", Const, 1}, + {"ARPHRD_HDLC", Const, 0}, + {"ARPHRD_HIPPI", Const, 0}, + {"ARPHRD_HWX25", Const, 0}, + {"ARPHRD_IEEE1394", Const, 0}, + {"ARPHRD_IEEE802", Const, 0}, + {"ARPHRD_IEEE80211", Const, 0}, + {"ARPHRD_IEEE80211_PRISM", Const, 0}, + {"ARPHRD_IEEE80211_RADIOTAP", Const, 0}, + {"ARPHRD_IEEE802154", Const, 0}, + {"ARPHRD_IEEE802154_PHY", Const, 0}, + {"ARPHRD_IEEE802_TR", Const, 0}, + {"ARPHRD_INFINIBAND", Const, 0}, + {"ARPHRD_IPDDP", Const, 0}, + {"ARPHRD_IPGRE", Const, 0}, + {"ARPHRD_IRDA", Const, 0}, + {"ARPHRD_LAPB", Const, 0}, + {"ARPHRD_LOCALTLK", Const, 0}, + {"ARPHRD_LOOPBACK", Const, 0}, + {"ARPHRD_METRICOM", Const, 0}, + {"ARPHRD_NETROM", Const, 0}, + {"ARPHRD_NONE", Const, 0}, + {"ARPHRD_PIMREG", Const, 0}, + {"ARPHRD_PPP", Const, 0}, + {"ARPHRD_PRONET", Const, 0}, + {"ARPHRD_RAWHDLC", Const, 0}, + {"ARPHRD_ROSE", Const, 0}, + {"ARPHRD_RSRVD", Const, 0}, + {"ARPHRD_SIT", Const, 0}, + {"ARPHRD_SKIP", Const, 0}, + {"ARPHRD_SLIP", Const, 0}, + {"ARPHRD_SLIP6", Const, 0}, + {"ARPHRD_STRIP", Const, 1}, + {"ARPHRD_TUNNEL", Const, 0}, + {"ARPHRD_TUNNEL6", Const, 0}, + {"ARPHRD_VOID", Const, 0}, + {"ARPHRD_X25", Const, 0}, + {"AUTHTYPE_CLIENT", Const, 0}, + {"AUTHTYPE_SERVER", Const, 0}, + {"Accept", Func, 0}, + {"Accept4", Func, 1}, + {"AcceptEx", Func, 0}, + {"Access", Func, 0}, + {"Acct", Func, 0}, + {"AddrinfoW", Type, 1}, + {"AddrinfoW.Addr", Field, 1}, + {"AddrinfoW.Addrlen", Field, 1}, + {"AddrinfoW.Canonname", Field, 1}, + {"AddrinfoW.Family", Field, 1}, + {"AddrinfoW.Flags", Field, 1}, + {"AddrinfoW.Next", Field, 1}, + {"AddrinfoW.Protocol", Field, 1}, + {"AddrinfoW.Socktype", Field, 1}, + {"Adjtime", Func, 0}, + {"Adjtimex", Func, 0}, + {"AllThreadsSyscall", Func, 16}, + {"AllThreadsSyscall6", Func, 16}, + {"AttachLsf", Func, 0}, + {"B0", Const, 0}, + {"B1000000", Const, 0}, + {"B110", Const, 0}, + {"B115200", Const, 0}, + {"B1152000", Const, 0}, + {"B1200", Const, 0}, + {"B134", Const, 0}, + {"B14400", Const, 1}, + {"B150", Const, 0}, + {"B1500000", Const, 0}, + {"B1800", Const, 0}, + {"B19200", Const, 0}, + {"B200", Const, 0}, + {"B2000000", Const, 0}, + {"B230400", Const, 0}, + {"B2400", Const, 0}, + {"B2500000", Const, 0}, + {"B28800", Const, 1}, + {"B300", Const, 0}, + {"B3000000", Const, 0}, + {"B3500000", Const, 0}, + {"B38400", Const, 0}, + {"B4000000", Const, 0}, + {"B460800", Const, 0}, + {"B4800", Const, 0}, + {"B50", Const, 0}, + {"B500000", Const, 0}, + {"B57600", Const, 0}, + {"B576000", Const, 0}, + {"B600", Const, 0}, + {"B7200", Const, 1}, + {"B75", Const, 0}, + {"B76800", Const, 1}, + {"B921600", Const, 0}, + {"B9600", Const, 0}, + {"BASE_PROTOCOL", Const, 2}, + {"BIOCFEEDBACK", Const, 0}, + {"BIOCFLUSH", Const, 0}, + {"BIOCGBLEN", Const, 0}, + {"BIOCGDIRECTION", Const, 0}, + {"BIOCGDIRFILT", Const, 1}, + {"BIOCGDLT", Const, 0}, + {"BIOCGDLTLIST", Const, 0}, + {"BIOCGETBUFMODE", Const, 0}, + {"BIOCGETIF", Const, 0}, + {"BIOCGETZMAX", Const, 0}, + {"BIOCGFEEDBACK", Const, 1}, + {"BIOCGFILDROP", Const, 1}, + {"BIOCGHDRCMPLT", Const, 0}, + {"BIOCGRSIG", Const, 0}, + {"BIOCGRTIMEOUT", Const, 0}, + {"BIOCGSEESENT", Const, 0}, + {"BIOCGSTATS", Const, 0}, + {"BIOCGSTATSOLD", Const, 1}, + {"BIOCGTSTAMP", Const, 1}, + {"BIOCIMMEDIATE", Const, 0}, + {"BIOCLOCK", Const, 0}, + {"BIOCPROMISC", Const, 0}, + {"BIOCROTZBUF", Const, 0}, + {"BIOCSBLEN", Const, 0}, + {"BIOCSDIRECTION", Const, 0}, + {"BIOCSDIRFILT", Const, 1}, + {"BIOCSDLT", Const, 0}, + {"BIOCSETBUFMODE", Const, 0}, + {"BIOCSETF", Const, 0}, + {"BIOCSETFNR", Const, 0}, + {"BIOCSETIF", Const, 0}, + {"BIOCSETWF", Const, 0}, + {"BIOCSETZBUF", Const, 0}, + {"BIOCSFEEDBACK", Const, 1}, + {"BIOCSFILDROP", Const, 1}, + {"BIOCSHDRCMPLT", Const, 0}, + {"BIOCSRSIG", Const, 0}, + {"BIOCSRTIMEOUT", Const, 0}, + {"BIOCSSEESENT", Const, 0}, + {"BIOCSTCPF", Const, 1}, + {"BIOCSTSTAMP", Const, 1}, + {"BIOCSUDPF", Const, 1}, + {"BIOCVERSION", Const, 0}, + {"BPF_A", Const, 0}, + {"BPF_ABS", Const, 0}, + {"BPF_ADD", Const, 0}, + {"BPF_ALIGNMENT", Const, 0}, + {"BPF_ALIGNMENT32", Const, 1}, + {"BPF_ALU", Const, 0}, + {"BPF_AND", Const, 0}, + {"BPF_B", Const, 0}, + {"BPF_BUFMODE_BUFFER", Const, 0}, + {"BPF_BUFMODE_ZBUF", Const, 0}, + {"BPF_DFLTBUFSIZE", Const, 1}, + {"BPF_DIRECTION_IN", Const, 1}, + {"BPF_DIRECTION_OUT", Const, 1}, + {"BPF_DIV", Const, 0}, + {"BPF_H", Const, 0}, + {"BPF_IMM", Const, 0}, + {"BPF_IND", Const, 0}, + {"BPF_JA", Const, 0}, + {"BPF_JEQ", Const, 0}, + {"BPF_JGE", Const, 0}, + {"BPF_JGT", Const, 0}, + {"BPF_JMP", Const, 0}, + {"BPF_JSET", Const, 0}, + {"BPF_K", Const, 0}, + {"BPF_LD", Const, 0}, + {"BPF_LDX", Const, 0}, + {"BPF_LEN", Const, 0}, + {"BPF_LSH", Const, 0}, + {"BPF_MAJOR_VERSION", Const, 0}, + {"BPF_MAXBUFSIZE", Const, 0}, + {"BPF_MAXINSNS", Const, 0}, + {"BPF_MEM", Const, 0}, + {"BPF_MEMWORDS", Const, 0}, + {"BPF_MINBUFSIZE", Const, 0}, + {"BPF_MINOR_VERSION", Const, 0}, + {"BPF_MISC", Const, 0}, + {"BPF_MSH", Const, 0}, + {"BPF_MUL", Const, 0}, + {"BPF_NEG", Const, 0}, + {"BPF_OR", Const, 0}, + {"BPF_RELEASE", Const, 0}, + {"BPF_RET", Const, 0}, + {"BPF_RSH", Const, 0}, + {"BPF_ST", Const, 0}, + {"BPF_STX", Const, 0}, + {"BPF_SUB", Const, 0}, + {"BPF_TAX", Const, 0}, + {"BPF_TXA", Const, 0}, + {"BPF_T_BINTIME", Const, 1}, + {"BPF_T_BINTIME_FAST", Const, 1}, + {"BPF_T_BINTIME_MONOTONIC", Const, 1}, + {"BPF_T_BINTIME_MONOTONIC_FAST", Const, 1}, + {"BPF_T_FAST", Const, 1}, + {"BPF_T_FLAG_MASK", Const, 1}, + {"BPF_T_FORMAT_MASK", Const, 1}, + {"BPF_T_MICROTIME", Const, 1}, + {"BPF_T_MICROTIME_FAST", Const, 1}, + {"BPF_T_MICROTIME_MONOTONIC", Const, 1}, + {"BPF_T_MICROTIME_MONOTONIC_FAST", Const, 1}, + {"BPF_T_MONOTONIC", Const, 1}, + {"BPF_T_MONOTONIC_FAST", Const, 1}, + {"BPF_T_NANOTIME", Const, 1}, + {"BPF_T_NANOTIME_FAST", Const, 1}, + {"BPF_T_NANOTIME_MONOTONIC", Const, 1}, + {"BPF_T_NANOTIME_MONOTONIC_FAST", Const, 1}, + {"BPF_T_NONE", Const, 1}, + {"BPF_T_NORMAL", Const, 1}, + {"BPF_W", Const, 0}, + {"BPF_X", Const, 0}, + {"BRKINT", Const, 0}, + {"Bind", Func, 0}, + {"BindToDevice", Func, 0}, + {"BpfBuflen", Func, 0}, + {"BpfDatalink", Func, 0}, + {"BpfHdr", Type, 0}, + {"BpfHdr.Caplen", Field, 0}, + {"BpfHdr.Datalen", Field, 0}, + {"BpfHdr.Hdrlen", Field, 0}, + {"BpfHdr.Pad_cgo_0", Field, 0}, + {"BpfHdr.Tstamp", Field, 0}, + {"BpfHeadercmpl", Func, 0}, + {"BpfInsn", Type, 0}, + {"BpfInsn.Code", Field, 0}, + {"BpfInsn.Jf", Field, 0}, + {"BpfInsn.Jt", Field, 0}, + {"BpfInsn.K", Field, 0}, + {"BpfInterface", Func, 0}, + {"BpfJump", Func, 0}, + {"BpfProgram", Type, 0}, + {"BpfProgram.Insns", Field, 0}, + {"BpfProgram.Len", Field, 0}, + {"BpfProgram.Pad_cgo_0", Field, 0}, + {"BpfStat", Type, 0}, + {"BpfStat.Capt", Field, 2}, + {"BpfStat.Drop", Field, 0}, + {"BpfStat.Padding", Field, 2}, + {"BpfStat.Recv", Field, 0}, + {"BpfStats", Func, 0}, + {"BpfStmt", Func, 0}, + {"BpfTimeout", Func, 0}, + {"BpfTimeval", Type, 2}, + {"BpfTimeval.Sec", Field, 2}, + {"BpfTimeval.Usec", Field, 2}, + {"BpfVersion", Type, 0}, + {"BpfVersion.Major", Field, 0}, + {"BpfVersion.Minor", Field, 0}, + {"BpfZbuf", Type, 0}, + {"BpfZbuf.Bufa", Field, 0}, + {"BpfZbuf.Bufb", Field, 0}, + {"BpfZbuf.Buflen", Field, 0}, + {"BpfZbufHeader", Type, 0}, + {"BpfZbufHeader.Kernel_gen", Field, 0}, + {"BpfZbufHeader.Kernel_len", Field, 0}, + {"BpfZbufHeader.User_gen", Field, 0}, + {"BpfZbufHeader.X_bzh_pad", Field, 0}, + {"ByHandleFileInformation", Type, 0}, + {"ByHandleFileInformation.CreationTime", Field, 0}, + {"ByHandleFileInformation.FileAttributes", Field, 0}, + {"ByHandleFileInformation.FileIndexHigh", Field, 0}, + {"ByHandleFileInformation.FileIndexLow", Field, 0}, + {"ByHandleFileInformation.FileSizeHigh", Field, 0}, + {"ByHandleFileInformation.FileSizeLow", Field, 0}, + {"ByHandleFileInformation.LastAccessTime", Field, 0}, + {"ByHandleFileInformation.LastWriteTime", Field, 0}, + {"ByHandleFileInformation.NumberOfLinks", Field, 0}, + {"ByHandleFileInformation.VolumeSerialNumber", Field, 0}, + {"BytePtrFromString", Func, 1}, + {"ByteSliceFromString", Func, 1}, + {"CCR0_FLUSH", Const, 1}, + {"CERT_CHAIN_POLICY_AUTHENTICODE", Const, 0}, + {"CERT_CHAIN_POLICY_AUTHENTICODE_TS", Const, 0}, + {"CERT_CHAIN_POLICY_BASE", Const, 0}, + {"CERT_CHAIN_POLICY_BASIC_CONSTRAINTS", Const, 0}, + {"CERT_CHAIN_POLICY_EV", Const, 0}, + {"CERT_CHAIN_POLICY_MICROSOFT_ROOT", Const, 0}, + {"CERT_CHAIN_POLICY_NT_AUTH", Const, 0}, + {"CERT_CHAIN_POLICY_SSL", Const, 0}, + {"CERT_E_CN_NO_MATCH", Const, 0}, + {"CERT_E_EXPIRED", Const, 0}, + {"CERT_E_PURPOSE", Const, 0}, + {"CERT_E_ROLE", Const, 0}, + {"CERT_E_UNTRUSTEDROOT", Const, 0}, + {"CERT_STORE_ADD_ALWAYS", Const, 0}, + {"CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG", Const, 0}, + {"CERT_STORE_PROV_MEMORY", Const, 0}, + {"CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT", Const, 0}, + {"CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT", Const, 0}, + {"CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT", Const, 0}, + {"CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT", Const, 0}, + {"CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT", Const, 0}, + {"CERT_TRUST_INVALID_BASIC_CONSTRAINTS", Const, 0}, + {"CERT_TRUST_INVALID_EXTENSION", Const, 0}, + {"CERT_TRUST_INVALID_NAME_CONSTRAINTS", Const, 0}, + {"CERT_TRUST_INVALID_POLICY_CONSTRAINTS", Const, 0}, + {"CERT_TRUST_IS_CYCLIC", Const, 0}, + {"CERT_TRUST_IS_EXPLICIT_DISTRUST", Const, 0}, + {"CERT_TRUST_IS_NOT_SIGNATURE_VALID", Const, 0}, + {"CERT_TRUST_IS_NOT_TIME_VALID", Const, 0}, + {"CERT_TRUST_IS_NOT_VALID_FOR_USAGE", Const, 0}, + {"CERT_TRUST_IS_OFFLINE_REVOCATION", Const, 0}, + {"CERT_TRUST_IS_REVOKED", Const, 0}, + {"CERT_TRUST_IS_UNTRUSTED_ROOT", Const, 0}, + {"CERT_TRUST_NO_ERROR", Const, 0}, + {"CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY", Const, 0}, + {"CERT_TRUST_REVOCATION_STATUS_UNKNOWN", Const, 0}, + {"CFLUSH", Const, 1}, + {"CLOCAL", Const, 0}, + {"CLONE_CHILD_CLEARTID", Const, 2}, + {"CLONE_CHILD_SETTID", Const, 2}, + {"CLONE_CLEAR_SIGHAND", Const, 20}, + {"CLONE_CSIGNAL", Const, 3}, + {"CLONE_DETACHED", Const, 2}, + {"CLONE_FILES", Const, 2}, + {"CLONE_FS", Const, 2}, + {"CLONE_INTO_CGROUP", Const, 20}, + {"CLONE_IO", Const, 2}, + {"CLONE_NEWCGROUP", Const, 20}, + {"CLONE_NEWIPC", Const, 2}, + {"CLONE_NEWNET", Const, 2}, + {"CLONE_NEWNS", Const, 2}, + {"CLONE_NEWPID", Const, 2}, + {"CLONE_NEWTIME", Const, 20}, + {"CLONE_NEWUSER", Const, 2}, + {"CLONE_NEWUTS", Const, 2}, + {"CLONE_PARENT", Const, 2}, + {"CLONE_PARENT_SETTID", Const, 2}, + {"CLONE_PID", Const, 3}, + {"CLONE_PIDFD", Const, 20}, + {"CLONE_PTRACE", Const, 2}, + {"CLONE_SETTLS", Const, 2}, + {"CLONE_SIGHAND", Const, 2}, + {"CLONE_SYSVSEM", Const, 2}, + {"CLONE_THREAD", Const, 2}, + {"CLONE_UNTRACED", Const, 2}, + {"CLONE_VFORK", Const, 2}, + {"CLONE_VM", Const, 2}, + {"CPUID_CFLUSH", Const, 1}, + {"CREAD", Const, 0}, + {"CREATE_ALWAYS", Const, 0}, + {"CREATE_NEW", Const, 0}, + {"CREATE_NEW_PROCESS_GROUP", Const, 1}, + {"CREATE_UNICODE_ENVIRONMENT", Const, 0}, + {"CRYPT_DEFAULT_CONTAINER_OPTIONAL", Const, 0}, + {"CRYPT_DELETEKEYSET", Const, 0}, + {"CRYPT_MACHINE_KEYSET", Const, 0}, + {"CRYPT_NEWKEYSET", Const, 0}, + {"CRYPT_SILENT", Const, 0}, + {"CRYPT_VERIFYCONTEXT", Const, 0}, + {"CS5", Const, 0}, + {"CS6", Const, 0}, + {"CS7", Const, 0}, + {"CS8", Const, 0}, + {"CSIZE", Const, 0}, + {"CSTART", Const, 1}, + {"CSTATUS", Const, 1}, + {"CSTOP", Const, 1}, + {"CSTOPB", Const, 0}, + {"CSUSP", Const, 1}, + {"CTL_MAXNAME", Const, 0}, + {"CTL_NET", Const, 0}, + {"CTL_QUERY", Const, 1}, + {"CTRL_BREAK_EVENT", Const, 1}, + {"CTRL_CLOSE_EVENT", Const, 14}, + {"CTRL_C_EVENT", Const, 1}, + {"CTRL_LOGOFF_EVENT", Const, 14}, + {"CTRL_SHUTDOWN_EVENT", Const, 14}, + {"CancelIo", Func, 0}, + {"CancelIoEx", Func, 1}, + {"CertAddCertificateContextToStore", Func, 0}, + {"CertChainContext", Type, 0}, + {"CertChainContext.ChainCount", Field, 0}, + {"CertChainContext.Chains", Field, 0}, + {"CertChainContext.HasRevocationFreshnessTime", Field, 0}, + {"CertChainContext.LowerQualityChainCount", Field, 0}, + {"CertChainContext.LowerQualityChains", Field, 0}, + {"CertChainContext.RevocationFreshnessTime", Field, 0}, + {"CertChainContext.Size", Field, 0}, + {"CertChainContext.TrustStatus", Field, 0}, + {"CertChainElement", Type, 0}, + {"CertChainElement.ApplicationUsage", Field, 0}, + {"CertChainElement.CertContext", Field, 0}, + {"CertChainElement.ExtendedErrorInfo", Field, 0}, + {"CertChainElement.IssuanceUsage", Field, 0}, + {"CertChainElement.RevocationInfo", Field, 0}, + {"CertChainElement.Size", Field, 0}, + {"CertChainElement.TrustStatus", Field, 0}, + {"CertChainPara", Type, 0}, + {"CertChainPara.CacheResync", Field, 0}, + {"CertChainPara.CheckRevocationFreshnessTime", Field, 0}, + {"CertChainPara.RequestedUsage", Field, 0}, + {"CertChainPara.RequstedIssuancePolicy", Field, 0}, + {"CertChainPara.RevocationFreshnessTime", Field, 0}, + {"CertChainPara.Size", Field, 0}, + {"CertChainPara.URLRetrievalTimeout", Field, 0}, + {"CertChainPolicyPara", Type, 0}, + {"CertChainPolicyPara.ExtraPolicyPara", Field, 0}, + {"CertChainPolicyPara.Flags", Field, 0}, + {"CertChainPolicyPara.Size", Field, 0}, + {"CertChainPolicyStatus", Type, 0}, + {"CertChainPolicyStatus.ChainIndex", Field, 0}, + {"CertChainPolicyStatus.ElementIndex", Field, 0}, + {"CertChainPolicyStatus.Error", Field, 0}, + {"CertChainPolicyStatus.ExtraPolicyStatus", Field, 0}, + {"CertChainPolicyStatus.Size", Field, 0}, + {"CertCloseStore", Func, 0}, + {"CertContext", Type, 0}, + {"CertContext.CertInfo", Field, 0}, + {"CertContext.EncodedCert", Field, 0}, + {"CertContext.EncodingType", Field, 0}, + {"CertContext.Length", Field, 0}, + {"CertContext.Store", Field, 0}, + {"CertCreateCertificateContext", Func, 0}, + {"CertEnhKeyUsage", Type, 0}, + {"CertEnhKeyUsage.Length", Field, 0}, + {"CertEnhKeyUsage.UsageIdentifiers", Field, 0}, + {"CertEnumCertificatesInStore", Func, 0}, + {"CertFreeCertificateChain", Func, 0}, + {"CertFreeCertificateContext", Func, 0}, + {"CertGetCertificateChain", Func, 0}, + {"CertInfo", Type, 11}, + {"CertOpenStore", Func, 0}, + {"CertOpenSystemStore", Func, 0}, + {"CertRevocationCrlInfo", Type, 11}, + {"CertRevocationInfo", Type, 0}, + {"CertRevocationInfo.CrlInfo", Field, 0}, + {"CertRevocationInfo.FreshnessTime", Field, 0}, + {"CertRevocationInfo.HasFreshnessTime", Field, 0}, + {"CertRevocationInfo.OidSpecificInfo", Field, 0}, + {"CertRevocationInfo.RevocationOid", Field, 0}, + {"CertRevocationInfo.RevocationResult", Field, 0}, + {"CertRevocationInfo.Size", Field, 0}, + {"CertSimpleChain", Type, 0}, + {"CertSimpleChain.Elements", Field, 0}, + {"CertSimpleChain.HasRevocationFreshnessTime", Field, 0}, + {"CertSimpleChain.NumElements", Field, 0}, + {"CertSimpleChain.RevocationFreshnessTime", Field, 0}, + {"CertSimpleChain.Size", Field, 0}, + {"CertSimpleChain.TrustListInfo", Field, 0}, + {"CertSimpleChain.TrustStatus", Field, 0}, + {"CertTrustListInfo", Type, 11}, + {"CertTrustStatus", Type, 0}, + {"CertTrustStatus.ErrorStatus", Field, 0}, + {"CertTrustStatus.InfoStatus", Field, 0}, + {"CertUsageMatch", Type, 0}, + {"CertUsageMatch.Type", Field, 0}, + {"CertUsageMatch.Usage", Field, 0}, + {"CertVerifyCertificateChainPolicy", Func, 0}, + {"Chdir", Func, 0}, + {"CheckBpfVersion", Func, 0}, + {"Chflags", Func, 0}, + {"Chmod", Func, 0}, + {"Chown", Func, 0}, + {"Chroot", Func, 0}, + {"Clearenv", Func, 0}, + {"Close", Func, 0}, + {"CloseHandle", Func, 0}, + {"CloseOnExec", Func, 0}, + {"Closesocket", Func, 0}, + {"CmsgLen", Func, 0}, + {"CmsgSpace", Func, 0}, + {"Cmsghdr", Type, 0}, + {"Cmsghdr.Len", Field, 0}, + {"Cmsghdr.Level", Field, 0}, + {"Cmsghdr.Type", Field, 0}, + {"Cmsghdr.X__cmsg_data", Field, 0}, + {"CommandLineToArgv", Func, 0}, + {"ComputerName", Func, 0}, + {"Conn", Type, 9}, + {"Connect", Func, 0}, + {"ConnectEx", Func, 1}, + {"ConvertSidToStringSid", Func, 0}, + {"ConvertStringSidToSid", Func, 0}, + {"CopySid", Func, 0}, + {"Creat", Func, 0}, + {"CreateDirectory", Func, 0}, + {"CreateFile", Func, 0}, + {"CreateFileMapping", Func, 0}, + {"CreateHardLink", Func, 4}, + {"CreateIoCompletionPort", Func, 0}, + {"CreatePipe", Func, 0}, + {"CreateProcess", Func, 0}, + {"CreateProcessAsUser", Func, 10}, + {"CreateSymbolicLink", Func, 4}, + {"CreateToolhelp32Snapshot", Func, 4}, + {"Credential", Type, 0}, + {"Credential.Gid", Field, 0}, + {"Credential.Groups", Field, 0}, + {"Credential.NoSetGroups", Field, 9}, + {"Credential.Uid", Field, 0}, + {"CryptAcquireContext", Func, 0}, + {"CryptGenRandom", Func, 0}, + {"CryptReleaseContext", Func, 0}, + {"DIOCBSFLUSH", Const, 1}, + {"DIOCOSFPFLUSH", Const, 1}, + {"DLL", Type, 0}, + {"DLL.Handle", Field, 0}, + {"DLL.Name", Field, 0}, + {"DLLError", Type, 0}, + {"DLLError.Err", Field, 0}, + {"DLLError.Msg", Field, 0}, + {"DLLError.ObjName", Field, 0}, + {"DLT_A429", Const, 0}, + {"DLT_A653_ICM", Const, 0}, + {"DLT_AIRONET_HEADER", Const, 0}, + {"DLT_AOS", Const, 1}, + {"DLT_APPLE_IP_OVER_IEEE1394", Const, 0}, + {"DLT_ARCNET", Const, 0}, + {"DLT_ARCNET_LINUX", Const, 0}, + {"DLT_ATM_CLIP", Const, 0}, + {"DLT_ATM_RFC1483", Const, 0}, + {"DLT_AURORA", Const, 0}, + {"DLT_AX25", Const, 0}, + {"DLT_AX25_KISS", Const, 0}, + {"DLT_BACNET_MS_TP", Const, 0}, + {"DLT_BLUETOOTH_HCI_H4", Const, 0}, + {"DLT_BLUETOOTH_HCI_H4_WITH_PHDR", Const, 0}, + {"DLT_CAN20B", Const, 0}, + {"DLT_CAN_SOCKETCAN", Const, 1}, + {"DLT_CHAOS", Const, 0}, + {"DLT_CHDLC", Const, 0}, + {"DLT_CISCO_IOS", Const, 0}, + {"DLT_C_HDLC", Const, 0}, + {"DLT_C_HDLC_WITH_DIR", Const, 0}, + {"DLT_DBUS", Const, 1}, + {"DLT_DECT", Const, 1}, + {"DLT_DOCSIS", Const, 0}, + {"DLT_DVB_CI", Const, 1}, + {"DLT_ECONET", Const, 0}, + {"DLT_EN10MB", Const, 0}, + {"DLT_EN3MB", Const, 0}, + {"DLT_ENC", Const, 0}, + {"DLT_ERF", Const, 0}, + {"DLT_ERF_ETH", Const, 0}, + {"DLT_ERF_POS", Const, 0}, + {"DLT_FC_2", Const, 1}, + {"DLT_FC_2_WITH_FRAME_DELIMS", Const, 1}, + {"DLT_FDDI", Const, 0}, + {"DLT_FLEXRAY", Const, 0}, + {"DLT_FRELAY", Const, 0}, + {"DLT_FRELAY_WITH_DIR", Const, 0}, + {"DLT_GCOM_SERIAL", Const, 0}, + {"DLT_GCOM_T1E1", Const, 0}, + {"DLT_GPF_F", Const, 0}, + {"DLT_GPF_T", Const, 0}, + {"DLT_GPRS_LLC", Const, 0}, + {"DLT_GSMTAP_ABIS", Const, 1}, + {"DLT_GSMTAP_UM", Const, 1}, + {"DLT_HDLC", Const, 1}, + {"DLT_HHDLC", Const, 0}, + {"DLT_HIPPI", Const, 1}, + {"DLT_IBM_SN", Const, 0}, + {"DLT_IBM_SP", Const, 0}, + {"DLT_IEEE802", Const, 0}, + {"DLT_IEEE802_11", Const, 0}, + {"DLT_IEEE802_11_RADIO", Const, 0}, + {"DLT_IEEE802_11_RADIO_AVS", Const, 0}, + {"DLT_IEEE802_15_4", Const, 0}, + {"DLT_IEEE802_15_4_LINUX", Const, 0}, + {"DLT_IEEE802_15_4_NOFCS", Const, 1}, + {"DLT_IEEE802_15_4_NONASK_PHY", Const, 0}, + {"DLT_IEEE802_16_MAC_CPS", Const, 0}, + {"DLT_IEEE802_16_MAC_CPS_RADIO", Const, 0}, + {"DLT_IPFILTER", Const, 0}, + {"DLT_IPMB", Const, 0}, + {"DLT_IPMB_LINUX", Const, 0}, + {"DLT_IPNET", Const, 1}, + {"DLT_IPOIB", Const, 1}, + {"DLT_IPV4", Const, 1}, + {"DLT_IPV6", Const, 1}, + {"DLT_IP_OVER_FC", Const, 0}, + {"DLT_JUNIPER_ATM1", Const, 0}, + {"DLT_JUNIPER_ATM2", Const, 0}, + {"DLT_JUNIPER_ATM_CEMIC", Const, 1}, + {"DLT_JUNIPER_CHDLC", Const, 0}, + {"DLT_JUNIPER_ES", Const, 0}, + {"DLT_JUNIPER_ETHER", Const, 0}, + {"DLT_JUNIPER_FIBRECHANNEL", Const, 1}, + {"DLT_JUNIPER_FRELAY", Const, 0}, + {"DLT_JUNIPER_GGSN", Const, 0}, + {"DLT_JUNIPER_ISM", Const, 0}, + {"DLT_JUNIPER_MFR", Const, 0}, + {"DLT_JUNIPER_MLFR", Const, 0}, + {"DLT_JUNIPER_MLPPP", Const, 0}, + {"DLT_JUNIPER_MONITOR", Const, 0}, + {"DLT_JUNIPER_PIC_PEER", Const, 0}, + {"DLT_JUNIPER_PPP", Const, 0}, + {"DLT_JUNIPER_PPPOE", Const, 0}, + {"DLT_JUNIPER_PPPOE_ATM", Const, 0}, + {"DLT_JUNIPER_SERVICES", Const, 0}, + {"DLT_JUNIPER_SRX_E2E", Const, 1}, + {"DLT_JUNIPER_ST", Const, 0}, + {"DLT_JUNIPER_VP", Const, 0}, + {"DLT_JUNIPER_VS", Const, 1}, + {"DLT_LAPB_WITH_DIR", Const, 0}, + {"DLT_LAPD", Const, 0}, + {"DLT_LIN", Const, 0}, + {"DLT_LINUX_EVDEV", Const, 1}, + {"DLT_LINUX_IRDA", Const, 0}, + {"DLT_LINUX_LAPD", Const, 0}, + {"DLT_LINUX_PPP_WITHDIRECTION", Const, 0}, + {"DLT_LINUX_SLL", Const, 0}, + {"DLT_LOOP", Const, 0}, + {"DLT_LTALK", Const, 0}, + {"DLT_MATCHING_MAX", Const, 1}, + {"DLT_MATCHING_MIN", Const, 1}, + {"DLT_MFR", Const, 0}, + {"DLT_MOST", Const, 0}, + {"DLT_MPEG_2_TS", Const, 1}, + {"DLT_MPLS", Const, 1}, + {"DLT_MTP2", Const, 0}, + {"DLT_MTP2_WITH_PHDR", Const, 0}, + {"DLT_MTP3", Const, 0}, + {"DLT_MUX27010", Const, 1}, + {"DLT_NETANALYZER", Const, 1}, + {"DLT_NETANALYZER_TRANSPARENT", Const, 1}, + {"DLT_NFC_LLCP", Const, 1}, + {"DLT_NFLOG", Const, 1}, + {"DLT_NG40", Const, 1}, + {"DLT_NULL", Const, 0}, + {"DLT_PCI_EXP", Const, 0}, + {"DLT_PFLOG", Const, 0}, + {"DLT_PFSYNC", Const, 0}, + {"DLT_PPI", Const, 0}, + {"DLT_PPP", Const, 0}, + {"DLT_PPP_BSDOS", Const, 0}, + {"DLT_PPP_ETHER", Const, 0}, + {"DLT_PPP_PPPD", Const, 0}, + {"DLT_PPP_SERIAL", Const, 0}, + {"DLT_PPP_WITH_DIR", Const, 0}, + {"DLT_PPP_WITH_DIRECTION", Const, 0}, + {"DLT_PRISM_HEADER", Const, 0}, + {"DLT_PRONET", Const, 0}, + {"DLT_RAIF1", Const, 0}, + {"DLT_RAW", Const, 0}, + {"DLT_RAWAF_MASK", Const, 1}, + {"DLT_RIO", Const, 0}, + {"DLT_SCCP", Const, 0}, + {"DLT_SITA", Const, 0}, + {"DLT_SLIP", Const, 0}, + {"DLT_SLIP_BSDOS", Const, 0}, + {"DLT_STANAG_5066_D_PDU", Const, 1}, + {"DLT_SUNATM", Const, 0}, + {"DLT_SYMANTEC_FIREWALL", Const, 0}, + {"DLT_TZSP", Const, 0}, + {"DLT_USB", Const, 0}, + {"DLT_USB_LINUX", Const, 0}, + {"DLT_USB_LINUX_MMAPPED", Const, 1}, + {"DLT_USER0", Const, 0}, + {"DLT_USER1", Const, 0}, + {"DLT_USER10", Const, 0}, + {"DLT_USER11", Const, 0}, + {"DLT_USER12", Const, 0}, + {"DLT_USER13", Const, 0}, + {"DLT_USER14", Const, 0}, + {"DLT_USER15", Const, 0}, + {"DLT_USER2", Const, 0}, + {"DLT_USER3", Const, 0}, + {"DLT_USER4", Const, 0}, + {"DLT_USER5", Const, 0}, + {"DLT_USER6", Const, 0}, + {"DLT_USER7", Const, 0}, + {"DLT_USER8", Const, 0}, + {"DLT_USER9", Const, 0}, + {"DLT_WIHART", Const, 1}, + {"DLT_X2E_SERIAL", Const, 0}, + {"DLT_X2E_XORAYA", Const, 0}, + {"DNSMXData", Type, 0}, + {"DNSMXData.NameExchange", Field, 0}, + {"DNSMXData.Pad", Field, 0}, + {"DNSMXData.Preference", Field, 0}, + {"DNSPTRData", Type, 0}, + {"DNSPTRData.Host", Field, 0}, + {"DNSRecord", Type, 0}, + {"DNSRecord.Data", Field, 0}, + {"DNSRecord.Dw", Field, 0}, + {"DNSRecord.Length", Field, 0}, + {"DNSRecord.Name", Field, 0}, + {"DNSRecord.Next", Field, 0}, + {"DNSRecord.Reserved", Field, 0}, + {"DNSRecord.Ttl", Field, 0}, + {"DNSRecord.Type", Field, 0}, + {"DNSSRVData", Type, 0}, + {"DNSSRVData.Pad", Field, 0}, + {"DNSSRVData.Port", Field, 0}, + {"DNSSRVData.Priority", Field, 0}, + {"DNSSRVData.Target", Field, 0}, + {"DNSSRVData.Weight", Field, 0}, + {"DNSTXTData", Type, 0}, + {"DNSTXTData.StringArray", Field, 0}, + {"DNSTXTData.StringCount", Field, 0}, + {"DNS_INFO_NO_RECORDS", Const, 4}, + {"DNS_TYPE_A", Const, 0}, + {"DNS_TYPE_A6", Const, 0}, + {"DNS_TYPE_AAAA", Const, 0}, + {"DNS_TYPE_ADDRS", Const, 0}, + {"DNS_TYPE_AFSDB", Const, 0}, + {"DNS_TYPE_ALL", Const, 0}, + {"DNS_TYPE_ANY", Const, 0}, + {"DNS_TYPE_ATMA", Const, 0}, + {"DNS_TYPE_AXFR", Const, 0}, + {"DNS_TYPE_CERT", Const, 0}, + {"DNS_TYPE_CNAME", Const, 0}, + {"DNS_TYPE_DHCID", Const, 0}, + {"DNS_TYPE_DNAME", Const, 0}, + {"DNS_TYPE_DNSKEY", Const, 0}, + {"DNS_TYPE_DS", Const, 0}, + {"DNS_TYPE_EID", Const, 0}, + {"DNS_TYPE_GID", Const, 0}, + {"DNS_TYPE_GPOS", Const, 0}, + {"DNS_TYPE_HINFO", Const, 0}, + {"DNS_TYPE_ISDN", Const, 0}, + {"DNS_TYPE_IXFR", Const, 0}, + {"DNS_TYPE_KEY", Const, 0}, + {"DNS_TYPE_KX", Const, 0}, + {"DNS_TYPE_LOC", Const, 0}, + {"DNS_TYPE_MAILA", Const, 0}, + {"DNS_TYPE_MAILB", Const, 0}, + {"DNS_TYPE_MB", Const, 0}, + {"DNS_TYPE_MD", Const, 0}, + {"DNS_TYPE_MF", Const, 0}, + {"DNS_TYPE_MG", Const, 0}, + {"DNS_TYPE_MINFO", Const, 0}, + {"DNS_TYPE_MR", Const, 0}, + {"DNS_TYPE_MX", Const, 0}, + {"DNS_TYPE_NAPTR", Const, 0}, + {"DNS_TYPE_NBSTAT", Const, 0}, + {"DNS_TYPE_NIMLOC", Const, 0}, + {"DNS_TYPE_NS", Const, 0}, + {"DNS_TYPE_NSAP", Const, 0}, + {"DNS_TYPE_NSAPPTR", Const, 0}, + {"DNS_TYPE_NSEC", Const, 0}, + {"DNS_TYPE_NULL", Const, 0}, + {"DNS_TYPE_NXT", Const, 0}, + {"DNS_TYPE_OPT", Const, 0}, + {"DNS_TYPE_PTR", Const, 0}, + {"DNS_TYPE_PX", Const, 0}, + {"DNS_TYPE_RP", Const, 0}, + {"DNS_TYPE_RRSIG", Const, 0}, + {"DNS_TYPE_RT", Const, 0}, + {"DNS_TYPE_SIG", Const, 0}, + {"DNS_TYPE_SINK", Const, 0}, + {"DNS_TYPE_SOA", Const, 0}, + {"DNS_TYPE_SRV", Const, 0}, + {"DNS_TYPE_TEXT", Const, 0}, + {"DNS_TYPE_TKEY", Const, 0}, + {"DNS_TYPE_TSIG", Const, 0}, + {"DNS_TYPE_UID", Const, 0}, + {"DNS_TYPE_UINFO", Const, 0}, + {"DNS_TYPE_UNSPEC", Const, 0}, + {"DNS_TYPE_WINS", Const, 0}, + {"DNS_TYPE_WINSR", Const, 0}, + {"DNS_TYPE_WKS", Const, 0}, + {"DNS_TYPE_X25", Const, 0}, + {"DT_BLK", Const, 0}, + {"DT_CHR", Const, 0}, + {"DT_DIR", Const, 0}, + {"DT_FIFO", Const, 0}, + {"DT_LNK", Const, 0}, + {"DT_REG", Const, 0}, + {"DT_SOCK", Const, 0}, + {"DT_UNKNOWN", Const, 0}, + {"DT_WHT", Const, 0}, + {"DUPLICATE_CLOSE_SOURCE", Const, 0}, + {"DUPLICATE_SAME_ACCESS", Const, 0}, + {"DeleteFile", Func, 0}, + {"DetachLsf", Func, 0}, + {"DeviceIoControl", Func, 4}, + {"Dirent", Type, 0}, + {"Dirent.Fileno", Field, 0}, + {"Dirent.Ino", Field, 0}, + {"Dirent.Name", Field, 0}, + {"Dirent.Namlen", Field, 0}, + {"Dirent.Off", Field, 0}, + {"Dirent.Pad0", Field, 12}, + {"Dirent.Pad1", Field, 12}, + {"Dirent.Pad_cgo_0", Field, 0}, + {"Dirent.Reclen", Field, 0}, + {"Dirent.Seekoff", Field, 0}, + {"Dirent.Type", Field, 0}, + {"Dirent.X__d_padding", Field, 3}, + {"DnsNameCompare", Func, 4}, + {"DnsQuery", Func, 0}, + {"DnsRecordListFree", Func, 0}, + {"DnsSectionAdditional", Const, 4}, + {"DnsSectionAnswer", Const, 4}, + {"DnsSectionAuthority", Const, 4}, + {"DnsSectionQuestion", Const, 4}, + {"Dup", Func, 0}, + {"Dup2", Func, 0}, + {"Dup3", Func, 2}, + {"DuplicateHandle", Func, 0}, + {"E2BIG", Const, 0}, + {"EACCES", Const, 0}, + {"EADDRINUSE", Const, 0}, + {"EADDRNOTAVAIL", Const, 0}, + {"EADV", Const, 0}, + {"EAFNOSUPPORT", Const, 0}, + {"EAGAIN", Const, 0}, + {"EALREADY", Const, 0}, + {"EAUTH", Const, 0}, + {"EBADARCH", Const, 0}, + {"EBADE", Const, 0}, + {"EBADEXEC", Const, 0}, + {"EBADF", Const, 0}, + {"EBADFD", Const, 0}, + {"EBADMACHO", Const, 0}, + {"EBADMSG", Const, 0}, + {"EBADR", Const, 0}, + {"EBADRPC", Const, 0}, + {"EBADRQC", Const, 0}, + {"EBADSLT", Const, 0}, + {"EBFONT", Const, 0}, + {"EBUSY", Const, 0}, + {"ECANCELED", Const, 0}, + {"ECAPMODE", Const, 1}, + {"ECHILD", Const, 0}, + {"ECHO", Const, 0}, + {"ECHOCTL", Const, 0}, + {"ECHOE", Const, 0}, + {"ECHOK", Const, 0}, + {"ECHOKE", Const, 0}, + {"ECHONL", Const, 0}, + {"ECHOPRT", Const, 0}, + {"ECHRNG", Const, 0}, + {"ECOMM", Const, 0}, + {"ECONNABORTED", Const, 0}, + {"ECONNREFUSED", Const, 0}, + {"ECONNRESET", Const, 0}, + {"EDEADLK", Const, 0}, + {"EDEADLOCK", Const, 0}, + {"EDESTADDRREQ", Const, 0}, + {"EDEVERR", Const, 0}, + {"EDOM", Const, 0}, + {"EDOOFUS", Const, 0}, + {"EDOTDOT", Const, 0}, + {"EDQUOT", Const, 0}, + {"EEXIST", Const, 0}, + {"EFAULT", Const, 0}, + {"EFBIG", Const, 0}, + {"EFER_LMA", Const, 1}, + {"EFER_LME", Const, 1}, + {"EFER_NXE", Const, 1}, + {"EFER_SCE", Const, 1}, + {"EFTYPE", Const, 0}, + {"EHOSTDOWN", Const, 0}, + {"EHOSTUNREACH", Const, 0}, + {"EHWPOISON", Const, 0}, + {"EIDRM", Const, 0}, + {"EILSEQ", Const, 0}, + {"EINPROGRESS", Const, 0}, + {"EINTR", Const, 0}, + {"EINVAL", Const, 0}, + {"EIO", Const, 0}, + {"EIPSEC", Const, 1}, + {"EISCONN", Const, 0}, + {"EISDIR", Const, 0}, + {"EISNAM", Const, 0}, + {"EKEYEXPIRED", Const, 0}, + {"EKEYREJECTED", Const, 0}, + {"EKEYREVOKED", Const, 0}, + {"EL2HLT", Const, 0}, + {"EL2NSYNC", Const, 0}, + {"EL3HLT", Const, 0}, + {"EL3RST", Const, 0}, + {"ELAST", Const, 0}, + {"ELF_NGREG", Const, 0}, + {"ELF_PRARGSZ", Const, 0}, + {"ELIBACC", Const, 0}, + {"ELIBBAD", Const, 0}, + {"ELIBEXEC", Const, 0}, + {"ELIBMAX", Const, 0}, + {"ELIBSCN", Const, 0}, + {"ELNRNG", Const, 0}, + {"ELOOP", Const, 0}, + {"EMEDIUMTYPE", Const, 0}, + {"EMFILE", Const, 0}, + {"EMLINK", Const, 0}, + {"EMSGSIZE", Const, 0}, + {"EMT_TAGOVF", Const, 1}, + {"EMULTIHOP", Const, 0}, + {"EMUL_ENABLED", Const, 1}, + {"EMUL_LINUX", Const, 1}, + {"EMUL_LINUX32", Const, 1}, + {"EMUL_MAXID", Const, 1}, + {"EMUL_NATIVE", Const, 1}, + {"ENAMETOOLONG", Const, 0}, + {"ENAVAIL", Const, 0}, + {"ENDRUNDISC", Const, 1}, + {"ENEEDAUTH", Const, 0}, + {"ENETDOWN", Const, 0}, + {"ENETRESET", Const, 0}, + {"ENETUNREACH", Const, 0}, + {"ENFILE", Const, 0}, + {"ENOANO", Const, 0}, + {"ENOATTR", Const, 0}, + {"ENOBUFS", Const, 0}, + {"ENOCSI", Const, 0}, + {"ENODATA", Const, 0}, + {"ENODEV", Const, 0}, + {"ENOENT", Const, 0}, + {"ENOEXEC", Const, 0}, + {"ENOKEY", Const, 0}, + {"ENOLCK", Const, 0}, + {"ENOLINK", Const, 0}, + {"ENOMEDIUM", Const, 0}, + {"ENOMEM", Const, 0}, + {"ENOMSG", Const, 0}, + {"ENONET", Const, 0}, + {"ENOPKG", Const, 0}, + {"ENOPOLICY", Const, 0}, + {"ENOPROTOOPT", Const, 0}, + {"ENOSPC", Const, 0}, + {"ENOSR", Const, 0}, + {"ENOSTR", Const, 0}, + {"ENOSYS", Const, 0}, + {"ENOTBLK", Const, 0}, + {"ENOTCAPABLE", Const, 0}, + {"ENOTCONN", Const, 0}, + {"ENOTDIR", Const, 0}, + {"ENOTEMPTY", Const, 0}, + {"ENOTNAM", Const, 0}, + {"ENOTRECOVERABLE", Const, 0}, + {"ENOTSOCK", Const, 0}, + {"ENOTSUP", Const, 0}, + {"ENOTTY", Const, 0}, + {"ENOTUNIQ", Const, 0}, + {"ENXIO", Const, 0}, + {"EN_SW_CTL_INF", Const, 1}, + {"EN_SW_CTL_PREC", Const, 1}, + {"EN_SW_CTL_ROUND", Const, 1}, + {"EN_SW_DATACHAIN", Const, 1}, + {"EN_SW_DENORM", Const, 1}, + {"EN_SW_INVOP", Const, 1}, + {"EN_SW_OVERFLOW", Const, 1}, + {"EN_SW_PRECLOSS", Const, 1}, + {"EN_SW_UNDERFLOW", Const, 1}, + {"EN_SW_ZERODIV", Const, 1}, + {"EOPNOTSUPP", Const, 0}, + {"EOVERFLOW", Const, 0}, + {"EOWNERDEAD", Const, 0}, + {"EPERM", Const, 0}, + {"EPFNOSUPPORT", Const, 0}, + {"EPIPE", Const, 0}, + {"EPOLLERR", Const, 0}, + {"EPOLLET", Const, 0}, + {"EPOLLHUP", Const, 0}, + {"EPOLLIN", Const, 0}, + {"EPOLLMSG", Const, 0}, + {"EPOLLONESHOT", Const, 0}, + {"EPOLLOUT", Const, 0}, + {"EPOLLPRI", Const, 0}, + {"EPOLLRDBAND", Const, 0}, + {"EPOLLRDHUP", Const, 0}, + {"EPOLLRDNORM", Const, 0}, + {"EPOLLWRBAND", Const, 0}, + {"EPOLLWRNORM", Const, 0}, + {"EPOLL_CLOEXEC", Const, 0}, + {"EPOLL_CTL_ADD", Const, 0}, + {"EPOLL_CTL_DEL", Const, 0}, + {"EPOLL_CTL_MOD", Const, 0}, + {"EPOLL_NONBLOCK", Const, 0}, + {"EPROCLIM", Const, 0}, + {"EPROCUNAVAIL", Const, 0}, + {"EPROGMISMATCH", Const, 0}, + {"EPROGUNAVAIL", Const, 0}, + {"EPROTO", Const, 0}, + {"EPROTONOSUPPORT", Const, 0}, + {"EPROTOTYPE", Const, 0}, + {"EPWROFF", Const, 0}, + {"EQFULL", Const, 16}, + {"ERANGE", Const, 0}, + {"EREMCHG", Const, 0}, + {"EREMOTE", Const, 0}, + {"EREMOTEIO", Const, 0}, + {"ERESTART", Const, 0}, + {"ERFKILL", Const, 0}, + {"EROFS", Const, 0}, + {"ERPCMISMATCH", Const, 0}, + {"ERROR_ACCESS_DENIED", Const, 0}, + {"ERROR_ALREADY_EXISTS", Const, 0}, + {"ERROR_BROKEN_PIPE", Const, 0}, + {"ERROR_BUFFER_OVERFLOW", Const, 0}, + {"ERROR_DIR_NOT_EMPTY", Const, 8}, + {"ERROR_ENVVAR_NOT_FOUND", Const, 0}, + {"ERROR_FILE_EXISTS", Const, 0}, + {"ERROR_FILE_NOT_FOUND", Const, 0}, + {"ERROR_HANDLE_EOF", Const, 2}, + {"ERROR_INSUFFICIENT_BUFFER", Const, 0}, + {"ERROR_IO_PENDING", Const, 0}, + {"ERROR_MOD_NOT_FOUND", Const, 0}, + {"ERROR_MORE_DATA", Const, 3}, + {"ERROR_NETNAME_DELETED", Const, 3}, + {"ERROR_NOT_FOUND", Const, 1}, + {"ERROR_NO_MORE_FILES", Const, 0}, + {"ERROR_OPERATION_ABORTED", Const, 0}, + {"ERROR_PATH_NOT_FOUND", Const, 0}, + {"ERROR_PRIVILEGE_NOT_HELD", Const, 4}, + {"ERROR_PROC_NOT_FOUND", Const, 0}, + {"ESHLIBVERS", Const, 0}, + {"ESHUTDOWN", Const, 0}, + {"ESOCKTNOSUPPORT", Const, 0}, + {"ESPIPE", Const, 0}, + {"ESRCH", Const, 0}, + {"ESRMNT", Const, 0}, + {"ESTALE", Const, 0}, + {"ESTRPIPE", Const, 0}, + {"ETHERCAP_JUMBO_MTU", Const, 1}, + {"ETHERCAP_VLAN_HWTAGGING", Const, 1}, + {"ETHERCAP_VLAN_MTU", Const, 1}, + {"ETHERMIN", Const, 1}, + {"ETHERMTU", Const, 1}, + {"ETHERMTU_JUMBO", Const, 1}, + {"ETHERTYPE_8023", Const, 1}, + {"ETHERTYPE_AARP", Const, 1}, + {"ETHERTYPE_ACCTON", Const, 1}, + {"ETHERTYPE_AEONIC", Const, 1}, + {"ETHERTYPE_ALPHA", Const, 1}, + {"ETHERTYPE_AMBER", Const, 1}, + {"ETHERTYPE_AMOEBA", Const, 1}, + {"ETHERTYPE_AOE", Const, 1}, + {"ETHERTYPE_APOLLO", Const, 1}, + {"ETHERTYPE_APOLLODOMAIN", Const, 1}, + {"ETHERTYPE_APPLETALK", Const, 1}, + {"ETHERTYPE_APPLITEK", Const, 1}, + {"ETHERTYPE_ARGONAUT", Const, 1}, + {"ETHERTYPE_ARP", Const, 1}, + {"ETHERTYPE_AT", Const, 1}, + {"ETHERTYPE_ATALK", Const, 1}, + {"ETHERTYPE_ATOMIC", Const, 1}, + {"ETHERTYPE_ATT", Const, 1}, + {"ETHERTYPE_ATTSTANFORD", Const, 1}, + {"ETHERTYPE_AUTOPHON", Const, 1}, + {"ETHERTYPE_AXIS", Const, 1}, + {"ETHERTYPE_BCLOOP", Const, 1}, + {"ETHERTYPE_BOFL", Const, 1}, + {"ETHERTYPE_CABLETRON", Const, 1}, + {"ETHERTYPE_CHAOS", Const, 1}, + {"ETHERTYPE_COMDESIGN", Const, 1}, + {"ETHERTYPE_COMPUGRAPHIC", Const, 1}, + {"ETHERTYPE_COUNTERPOINT", Const, 1}, + {"ETHERTYPE_CRONUS", Const, 1}, + {"ETHERTYPE_CRONUSVLN", Const, 1}, + {"ETHERTYPE_DCA", Const, 1}, + {"ETHERTYPE_DDE", Const, 1}, + {"ETHERTYPE_DEBNI", Const, 1}, + {"ETHERTYPE_DECAM", Const, 1}, + {"ETHERTYPE_DECCUST", Const, 1}, + {"ETHERTYPE_DECDIAG", Const, 1}, + {"ETHERTYPE_DECDNS", Const, 1}, + {"ETHERTYPE_DECDTS", Const, 1}, + {"ETHERTYPE_DECEXPER", Const, 1}, + {"ETHERTYPE_DECLAST", Const, 1}, + {"ETHERTYPE_DECLTM", Const, 1}, + {"ETHERTYPE_DECMUMPS", Const, 1}, + {"ETHERTYPE_DECNETBIOS", Const, 1}, + {"ETHERTYPE_DELTACON", Const, 1}, + {"ETHERTYPE_DIDDLE", Const, 1}, + {"ETHERTYPE_DLOG1", Const, 1}, + {"ETHERTYPE_DLOG2", Const, 1}, + {"ETHERTYPE_DN", Const, 1}, + {"ETHERTYPE_DOGFIGHT", Const, 1}, + {"ETHERTYPE_DSMD", Const, 1}, + {"ETHERTYPE_ECMA", Const, 1}, + {"ETHERTYPE_ENCRYPT", Const, 1}, + {"ETHERTYPE_ES", Const, 1}, + {"ETHERTYPE_EXCELAN", Const, 1}, + {"ETHERTYPE_EXPERDATA", Const, 1}, + {"ETHERTYPE_FLIP", Const, 1}, + {"ETHERTYPE_FLOWCONTROL", Const, 1}, + {"ETHERTYPE_FRARP", Const, 1}, + {"ETHERTYPE_GENDYN", Const, 1}, + {"ETHERTYPE_HAYES", Const, 1}, + {"ETHERTYPE_HIPPI_FP", Const, 1}, + {"ETHERTYPE_HITACHI", Const, 1}, + {"ETHERTYPE_HP", Const, 1}, + {"ETHERTYPE_IEEEPUP", Const, 1}, + {"ETHERTYPE_IEEEPUPAT", Const, 1}, + {"ETHERTYPE_IMLBL", Const, 1}, + {"ETHERTYPE_IMLBLDIAG", Const, 1}, + {"ETHERTYPE_IP", Const, 1}, + {"ETHERTYPE_IPAS", Const, 1}, + {"ETHERTYPE_IPV6", Const, 1}, + {"ETHERTYPE_IPX", Const, 1}, + {"ETHERTYPE_IPXNEW", Const, 1}, + {"ETHERTYPE_KALPANA", Const, 1}, + {"ETHERTYPE_LANBRIDGE", Const, 1}, + {"ETHERTYPE_LANPROBE", Const, 1}, + {"ETHERTYPE_LAT", Const, 1}, + {"ETHERTYPE_LBACK", Const, 1}, + {"ETHERTYPE_LITTLE", Const, 1}, + {"ETHERTYPE_LLDP", Const, 1}, + {"ETHERTYPE_LOGICRAFT", Const, 1}, + {"ETHERTYPE_LOOPBACK", Const, 1}, + {"ETHERTYPE_MATRA", Const, 1}, + {"ETHERTYPE_MAX", Const, 1}, + {"ETHERTYPE_MERIT", Const, 1}, + {"ETHERTYPE_MICP", Const, 1}, + {"ETHERTYPE_MOPDL", Const, 1}, + {"ETHERTYPE_MOPRC", Const, 1}, + {"ETHERTYPE_MOTOROLA", Const, 1}, + {"ETHERTYPE_MPLS", Const, 1}, + {"ETHERTYPE_MPLS_MCAST", Const, 1}, + {"ETHERTYPE_MUMPS", Const, 1}, + {"ETHERTYPE_NBPCC", Const, 1}, + {"ETHERTYPE_NBPCLAIM", Const, 1}, + {"ETHERTYPE_NBPCLREQ", Const, 1}, + {"ETHERTYPE_NBPCLRSP", Const, 1}, + {"ETHERTYPE_NBPCREQ", Const, 1}, + {"ETHERTYPE_NBPCRSP", Const, 1}, + {"ETHERTYPE_NBPDG", Const, 1}, + {"ETHERTYPE_NBPDGB", Const, 1}, + {"ETHERTYPE_NBPDLTE", Const, 1}, + {"ETHERTYPE_NBPRAR", Const, 1}, + {"ETHERTYPE_NBPRAS", Const, 1}, + {"ETHERTYPE_NBPRST", Const, 1}, + {"ETHERTYPE_NBPSCD", Const, 1}, + {"ETHERTYPE_NBPVCD", Const, 1}, + {"ETHERTYPE_NBS", Const, 1}, + {"ETHERTYPE_NCD", Const, 1}, + {"ETHERTYPE_NESTAR", Const, 1}, + {"ETHERTYPE_NETBEUI", Const, 1}, + {"ETHERTYPE_NOVELL", Const, 1}, + {"ETHERTYPE_NS", Const, 1}, + {"ETHERTYPE_NSAT", Const, 1}, + {"ETHERTYPE_NSCOMPAT", Const, 1}, + {"ETHERTYPE_NTRAILER", Const, 1}, + {"ETHERTYPE_OS9", Const, 1}, + {"ETHERTYPE_OS9NET", Const, 1}, + {"ETHERTYPE_PACER", Const, 1}, + {"ETHERTYPE_PAE", Const, 1}, + {"ETHERTYPE_PCS", Const, 1}, + {"ETHERTYPE_PLANNING", Const, 1}, + {"ETHERTYPE_PPP", Const, 1}, + {"ETHERTYPE_PPPOE", Const, 1}, + {"ETHERTYPE_PPPOEDISC", Const, 1}, + {"ETHERTYPE_PRIMENTS", Const, 1}, + {"ETHERTYPE_PUP", Const, 1}, + {"ETHERTYPE_PUPAT", Const, 1}, + {"ETHERTYPE_QINQ", Const, 1}, + {"ETHERTYPE_RACAL", Const, 1}, + {"ETHERTYPE_RATIONAL", Const, 1}, + {"ETHERTYPE_RAWFR", Const, 1}, + {"ETHERTYPE_RCL", Const, 1}, + {"ETHERTYPE_RDP", Const, 1}, + {"ETHERTYPE_RETIX", Const, 1}, + {"ETHERTYPE_REVARP", Const, 1}, + {"ETHERTYPE_SCA", Const, 1}, + {"ETHERTYPE_SECTRA", Const, 1}, + {"ETHERTYPE_SECUREDATA", Const, 1}, + {"ETHERTYPE_SGITW", Const, 1}, + {"ETHERTYPE_SG_BOUNCE", Const, 1}, + {"ETHERTYPE_SG_DIAG", Const, 1}, + {"ETHERTYPE_SG_NETGAMES", Const, 1}, + {"ETHERTYPE_SG_RESV", Const, 1}, + {"ETHERTYPE_SIMNET", Const, 1}, + {"ETHERTYPE_SLOW", Const, 1}, + {"ETHERTYPE_SLOWPROTOCOLS", Const, 1}, + {"ETHERTYPE_SNA", Const, 1}, + {"ETHERTYPE_SNMP", Const, 1}, + {"ETHERTYPE_SONIX", Const, 1}, + {"ETHERTYPE_SPIDER", Const, 1}, + {"ETHERTYPE_SPRITE", Const, 1}, + {"ETHERTYPE_STP", Const, 1}, + {"ETHERTYPE_TALARIS", Const, 1}, + {"ETHERTYPE_TALARISMC", Const, 1}, + {"ETHERTYPE_TCPCOMP", Const, 1}, + {"ETHERTYPE_TCPSM", Const, 1}, + {"ETHERTYPE_TEC", Const, 1}, + {"ETHERTYPE_TIGAN", Const, 1}, + {"ETHERTYPE_TRAIL", Const, 1}, + {"ETHERTYPE_TRANSETHER", Const, 1}, + {"ETHERTYPE_TYMSHARE", Const, 1}, + {"ETHERTYPE_UBBST", Const, 1}, + {"ETHERTYPE_UBDEBUG", Const, 1}, + {"ETHERTYPE_UBDIAGLOOP", Const, 1}, + {"ETHERTYPE_UBDL", Const, 1}, + {"ETHERTYPE_UBNIU", Const, 1}, + {"ETHERTYPE_UBNMC", Const, 1}, + {"ETHERTYPE_VALID", Const, 1}, + {"ETHERTYPE_VARIAN", Const, 1}, + {"ETHERTYPE_VAXELN", Const, 1}, + {"ETHERTYPE_VEECO", Const, 1}, + {"ETHERTYPE_VEXP", Const, 1}, + {"ETHERTYPE_VGLAB", Const, 1}, + {"ETHERTYPE_VINES", Const, 1}, + {"ETHERTYPE_VINESECHO", Const, 1}, + {"ETHERTYPE_VINESLOOP", Const, 1}, + {"ETHERTYPE_VITAL", Const, 1}, + {"ETHERTYPE_VLAN", Const, 1}, + {"ETHERTYPE_VLTLMAN", Const, 1}, + {"ETHERTYPE_VPROD", Const, 1}, + {"ETHERTYPE_VURESERVED", Const, 1}, + {"ETHERTYPE_WATERLOO", Const, 1}, + {"ETHERTYPE_WELLFLEET", Const, 1}, + {"ETHERTYPE_X25", Const, 1}, + {"ETHERTYPE_X75", Const, 1}, + {"ETHERTYPE_XNSSM", Const, 1}, + {"ETHERTYPE_XTP", Const, 1}, + {"ETHER_ADDR_LEN", Const, 1}, + {"ETHER_ALIGN", Const, 1}, + {"ETHER_CRC_LEN", Const, 1}, + {"ETHER_CRC_POLY_BE", Const, 1}, + {"ETHER_CRC_POLY_LE", Const, 1}, + {"ETHER_HDR_LEN", Const, 1}, + {"ETHER_MAX_DIX_LEN", Const, 1}, + {"ETHER_MAX_LEN", Const, 1}, + {"ETHER_MAX_LEN_JUMBO", Const, 1}, + {"ETHER_MIN_LEN", Const, 1}, + {"ETHER_PPPOE_ENCAP_LEN", Const, 1}, + {"ETHER_TYPE_LEN", Const, 1}, + {"ETHER_VLAN_ENCAP_LEN", Const, 1}, + {"ETH_P_1588", Const, 0}, + {"ETH_P_8021Q", Const, 0}, + {"ETH_P_802_2", Const, 0}, + {"ETH_P_802_3", Const, 0}, + {"ETH_P_AARP", Const, 0}, + {"ETH_P_ALL", Const, 0}, + {"ETH_P_AOE", Const, 0}, + {"ETH_P_ARCNET", Const, 0}, + {"ETH_P_ARP", Const, 0}, + {"ETH_P_ATALK", Const, 0}, + {"ETH_P_ATMFATE", Const, 0}, + {"ETH_P_ATMMPOA", Const, 0}, + {"ETH_P_AX25", Const, 0}, + {"ETH_P_BPQ", Const, 0}, + {"ETH_P_CAIF", Const, 0}, + {"ETH_P_CAN", Const, 0}, + {"ETH_P_CONTROL", Const, 0}, + {"ETH_P_CUST", Const, 0}, + {"ETH_P_DDCMP", Const, 0}, + {"ETH_P_DEC", Const, 0}, + {"ETH_P_DIAG", Const, 0}, + {"ETH_P_DNA_DL", Const, 0}, + {"ETH_P_DNA_RC", Const, 0}, + {"ETH_P_DNA_RT", Const, 0}, + {"ETH_P_DSA", Const, 0}, + {"ETH_P_ECONET", Const, 0}, + {"ETH_P_EDSA", Const, 0}, + {"ETH_P_FCOE", Const, 0}, + {"ETH_P_FIP", Const, 0}, + {"ETH_P_HDLC", Const, 0}, + {"ETH_P_IEEE802154", Const, 0}, + {"ETH_P_IEEEPUP", Const, 0}, + {"ETH_P_IEEEPUPAT", Const, 0}, + {"ETH_P_IP", Const, 0}, + {"ETH_P_IPV6", Const, 0}, + {"ETH_P_IPX", Const, 0}, + {"ETH_P_IRDA", Const, 0}, + {"ETH_P_LAT", Const, 0}, + {"ETH_P_LINK_CTL", Const, 0}, + {"ETH_P_LOCALTALK", Const, 0}, + {"ETH_P_LOOP", Const, 0}, + {"ETH_P_MOBITEX", Const, 0}, + {"ETH_P_MPLS_MC", Const, 0}, + {"ETH_P_MPLS_UC", Const, 0}, + {"ETH_P_PAE", Const, 0}, + {"ETH_P_PAUSE", Const, 0}, + {"ETH_P_PHONET", Const, 0}, + {"ETH_P_PPPTALK", Const, 0}, + {"ETH_P_PPP_DISC", Const, 0}, + {"ETH_P_PPP_MP", Const, 0}, + {"ETH_P_PPP_SES", Const, 0}, + {"ETH_P_PUP", Const, 0}, + {"ETH_P_PUPAT", Const, 0}, + {"ETH_P_RARP", Const, 0}, + {"ETH_P_SCA", Const, 0}, + {"ETH_P_SLOW", Const, 0}, + {"ETH_P_SNAP", Const, 0}, + {"ETH_P_TEB", Const, 0}, + {"ETH_P_TIPC", Const, 0}, + {"ETH_P_TRAILER", Const, 0}, + {"ETH_P_TR_802_2", Const, 0}, + {"ETH_P_WAN_PPP", Const, 0}, + {"ETH_P_WCCP", Const, 0}, + {"ETH_P_X25", Const, 0}, + {"ETIME", Const, 0}, + {"ETIMEDOUT", Const, 0}, + {"ETOOMANYREFS", Const, 0}, + {"ETXTBSY", Const, 0}, + {"EUCLEAN", Const, 0}, + {"EUNATCH", Const, 0}, + {"EUSERS", Const, 0}, + {"EVFILT_AIO", Const, 0}, + {"EVFILT_FS", Const, 0}, + {"EVFILT_LIO", Const, 0}, + {"EVFILT_MACHPORT", Const, 0}, + {"EVFILT_PROC", Const, 0}, + {"EVFILT_READ", Const, 0}, + {"EVFILT_SIGNAL", Const, 0}, + {"EVFILT_SYSCOUNT", Const, 0}, + {"EVFILT_THREADMARKER", Const, 0}, + {"EVFILT_TIMER", Const, 0}, + {"EVFILT_USER", Const, 0}, + {"EVFILT_VM", Const, 0}, + {"EVFILT_VNODE", Const, 0}, + {"EVFILT_WRITE", Const, 0}, + {"EV_ADD", Const, 0}, + {"EV_CLEAR", Const, 0}, + {"EV_DELETE", Const, 0}, + {"EV_DISABLE", Const, 0}, + {"EV_DISPATCH", Const, 0}, + {"EV_DROP", Const, 3}, + {"EV_ENABLE", Const, 0}, + {"EV_EOF", Const, 0}, + {"EV_ERROR", Const, 0}, + {"EV_FLAG0", Const, 0}, + {"EV_FLAG1", Const, 0}, + {"EV_ONESHOT", Const, 0}, + {"EV_OOBAND", Const, 0}, + {"EV_POLL", Const, 0}, + {"EV_RECEIPT", Const, 0}, + {"EV_SYSFLAGS", Const, 0}, + {"EWINDOWS", Const, 0}, + {"EWOULDBLOCK", Const, 0}, + {"EXDEV", Const, 0}, + {"EXFULL", Const, 0}, + {"EXTA", Const, 0}, + {"EXTB", Const, 0}, + {"EXTPROC", Const, 0}, + {"Environ", Func, 0}, + {"EpollCreate", Func, 0}, + {"EpollCreate1", Func, 0}, + {"EpollCtl", Func, 0}, + {"EpollEvent", Type, 0}, + {"EpollEvent.Events", Field, 0}, + {"EpollEvent.Fd", Field, 0}, + {"EpollEvent.Pad", Field, 0}, + {"EpollEvent.PadFd", Field, 0}, + {"EpollWait", Func, 0}, + {"Errno", Type, 0}, + {"EscapeArg", Func, 0}, + {"Exchangedata", Func, 0}, + {"Exec", Func, 0}, + {"Exit", Func, 0}, + {"ExitProcess", Func, 0}, + {"FD_CLOEXEC", Const, 0}, + {"FD_SETSIZE", Const, 0}, + {"FILE_ACTION_ADDED", Const, 0}, + {"FILE_ACTION_MODIFIED", Const, 0}, + {"FILE_ACTION_REMOVED", Const, 0}, + {"FILE_ACTION_RENAMED_NEW_NAME", Const, 0}, + {"FILE_ACTION_RENAMED_OLD_NAME", Const, 0}, + {"FILE_APPEND_DATA", Const, 0}, + {"FILE_ATTRIBUTE_ARCHIVE", Const, 0}, + {"FILE_ATTRIBUTE_DIRECTORY", Const, 0}, + {"FILE_ATTRIBUTE_HIDDEN", Const, 0}, + {"FILE_ATTRIBUTE_NORMAL", Const, 0}, + {"FILE_ATTRIBUTE_READONLY", Const, 0}, + {"FILE_ATTRIBUTE_REPARSE_POINT", Const, 4}, + {"FILE_ATTRIBUTE_SYSTEM", Const, 0}, + {"FILE_BEGIN", Const, 0}, + {"FILE_CURRENT", Const, 0}, + {"FILE_END", Const, 0}, + {"FILE_FLAG_BACKUP_SEMANTICS", Const, 0}, + {"FILE_FLAG_OPEN_REPARSE_POINT", Const, 4}, + {"FILE_FLAG_OVERLAPPED", Const, 0}, + {"FILE_LIST_DIRECTORY", Const, 0}, + {"FILE_MAP_COPY", Const, 0}, + {"FILE_MAP_EXECUTE", Const, 0}, + {"FILE_MAP_READ", Const, 0}, + {"FILE_MAP_WRITE", Const, 0}, + {"FILE_NOTIFY_CHANGE_ATTRIBUTES", Const, 0}, + {"FILE_NOTIFY_CHANGE_CREATION", Const, 0}, + {"FILE_NOTIFY_CHANGE_DIR_NAME", Const, 0}, + {"FILE_NOTIFY_CHANGE_FILE_NAME", Const, 0}, + {"FILE_NOTIFY_CHANGE_LAST_ACCESS", Const, 0}, + {"FILE_NOTIFY_CHANGE_LAST_WRITE", Const, 0}, + {"FILE_NOTIFY_CHANGE_SIZE", Const, 0}, + {"FILE_SHARE_DELETE", Const, 0}, + {"FILE_SHARE_READ", Const, 0}, + {"FILE_SHARE_WRITE", Const, 0}, + {"FILE_SKIP_COMPLETION_PORT_ON_SUCCESS", Const, 2}, + {"FILE_SKIP_SET_EVENT_ON_HANDLE", Const, 2}, + {"FILE_TYPE_CHAR", Const, 0}, + {"FILE_TYPE_DISK", Const, 0}, + {"FILE_TYPE_PIPE", Const, 0}, + {"FILE_TYPE_REMOTE", Const, 0}, + {"FILE_TYPE_UNKNOWN", Const, 0}, + {"FILE_WRITE_ATTRIBUTES", Const, 0}, + {"FLUSHO", Const, 0}, + {"FORMAT_MESSAGE_ALLOCATE_BUFFER", Const, 0}, + {"FORMAT_MESSAGE_ARGUMENT_ARRAY", Const, 0}, + {"FORMAT_MESSAGE_FROM_HMODULE", Const, 0}, + {"FORMAT_MESSAGE_FROM_STRING", Const, 0}, + {"FORMAT_MESSAGE_FROM_SYSTEM", Const, 0}, + {"FORMAT_MESSAGE_IGNORE_INSERTS", Const, 0}, + {"FORMAT_MESSAGE_MAX_WIDTH_MASK", Const, 0}, + {"FSCTL_GET_REPARSE_POINT", Const, 4}, + {"F_ADDFILESIGS", Const, 0}, + {"F_ADDSIGS", Const, 0}, + {"F_ALLOCATEALL", Const, 0}, + {"F_ALLOCATECONTIG", Const, 0}, + {"F_CANCEL", Const, 0}, + {"F_CHKCLEAN", Const, 0}, + {"F_CLOSEM", Const, 1}, + {"F_DUP2FD", Const, 0}, + {"F_DUP2FD_CLOEXEC", Const, 1}, + {"F_DUPFD", Const, 0}, + {"F_DUPFD_CLOEXEC", Const, 0}, + {"F_EXLCK", Const, 0}, + {"F_FINDSIGS", Const, 16}, + {"F_FLUSH_DATA", Const, 0}, + {"F_FREEZE_FS", Const, 0}, + {"F_FSCTL", Const, 1}, + {"F_FSDIRMASK", Const, 1}, + {"F_FSIN", Const, 1}, + {"F_FSINOUT", Const, 1}, + {"F_FSOUT", Const, 1}, + {"F_FSPRIV", Const, 1}, + {"F_FSVOID", Const, 1}, + {"F_FULLFSYNC", Const, 0}, + {"F_GETCODEDIR", Const, 16}, + {"F_GETFD", Const, 0}, + {"F_GETFL", Const, 0}, + {"F_GETLEASE", Const, 0}, + {"F_GETLK", Const, 0}, + {"F_GETLK64", Const, 0}, + {"F_GETLKPID", Const, 0}, + {"F_GETNOSIGPIPE", Const, 0}, + {"F_GETOWN", Const, 0}, + {"F_GETOWN_EX", Const, 0}, + {"F_GETPATH", Const, 0}, + {"F_GETPATH_MTMINFO", Const, 0}, + {"F_GETPIPE_SZ", Const, 0}, + {"F_GETPROTECTIONCLASS", Const, 0}, + {"F_GETPROTECTIONLEVEL", Const, 16}, + {"F_GETSIG", Const, 0}, + {"F_GLOBAL_NOCACHE", Const, 0}, + {"F_LOCK", Const, 0}, + {"F_LOG2PHYS", Const, 0}, + {"F_LOG2PHYS_EXT", Const, 0}, + {"F_MARKDEPENDENCY", Const, 0}, + {"F_MAXFD", Const, 1}, + {"F_NOCACHE", Const, 0}, + {"F_NODIRECT", Const, 0}, + {"F_NOTIFY", Const, 0}, + {"F_OGETLK", Const, 0}, + {"F_OK", Const, 0}, + {"F_OSETLK", Const, 0}, + {"F_OSETLKW", Const, 0}, + {"F_PARAM_MASK", Const, 1}, + {"F_PARAM_MAX", Const, 1}, + {"F_PATHPKG_CHECK", Const, 0}, + {"F_PEOFPOSMODE", Const, 0}, + {"F_PREALLOCATE", Const, 0}, + {"F_RDADVISE", Const, 0}, + {"F_RDAHEAD", Const, 0}, + {"F_RDLCK", Const, 0}, + {"F_READAHEAD", Const, 0}, + {"F_READBOOTSTRAP", Const, 0}, + {"F_SETBACKINGSTORE", Const, 0}, + {"F_SETFD", Const, 0}, + {"F_SETFL", Const, 0}, + {"F_SETLEASE", Const, 0}, + {"F_SETLK", Const, 0}, + {"F_SETLK64", Const, 0}, + {"F_SETLKW", Const, 0}, + {"F_SETLKW64", Const, 0}, + {"F_SETLKWTIMEOUT", Const, 16}, + {"F_SETLK_REMOTE", Const, 0}, + {"F_SETNOSIGPIPE", Const, 0}, + {"F_SETOWN", Const, 0}, + {"F_SETOWN_EX", Const, 0}, + {"F_SETPIPE_SZ", Const, 0}, + {"F_SETPROTECTIONCLASS", Const, 0}, + {"F_SETSIG", Const, 0}, + {"F_SETSIZE", Const, 0}, + {"F_SHLCK", Const, 0}, + {"F_SINGLE_WRITER", Const, 16}, + {"F_TEST", Const, 0}, + {"F_THAW_FS", Const, 0}, + {"F_TLOCK", Const, 0}, + {"F_TRANSCODEKEY", Const, 16}, + {"F_ULOCK", Const, 0}, + {"F_UNLCK", Const, 0}, + {"F_UNLCKSYS", Const, 0}, + {"F_VOLPOSMODE", Const, 0}, + {"F_WRITEBOOTSTRAP", Const, 0}, + {"F_WRLCK", Const, 0}, + {"Faccessat", Func, 0}, + {"Fallocate", Func, 0}, + {"Fbootstraptransfer_t", Type, 0}, + {"Fbootstraptransfer_t.Buffer", Field, 0}, + {"Fbootstraptransfer_t.Length", Field, 0}, + {"Fbootstraptransfer_t.Offset", Field, 0}, + {"Fchdir", Func, 0}, + {"Fchflags", Func, 0}, + {"Fchmod", Func, 0}, + {"Fchmodat", Func, 0}, + {"Fchown", Func, 0}, + {"Fchownat", Func, 0}, + {"FcntlFlock", Func, 3}, + {"FdSet", Type, 0}, + {"FdSet.Bits", Field, 0}, + {"FdSet.X__fds_bits", Field, 0}, + {"Fdatasync", Func, 0}, + {"FileNotifyInformation", Type, 0}, + {"FileNotifyInformation.Action", Field, 0}, + {"FileNotifyInformation.FileName", Field, 0}, + {"FileNotifyInformation.FileNameLength", Field, 0}, + {"FileNotifyInformation.NextEntryOffset", Field, 0}, + {"Filetime", Type, 0}, + {"Filetime.HighDateTime", Field, 0}, + {"Filetime.LowDateTime", Field, 0}, + {"FindClose", Func, 0}, + {"FindFirstFile", Func, 0}, + {"FindNextFile", Func, 0}, + {"Flock", Func, 0}, + {"Flock_t", Type, 0}, + {"Flock_t.Len", Field, 0}, + {"Flock_t.Pad_cgo_0", Field, 0}, + {"Flock_t.Pad_cgo_1", Field, 3}, + {"Flock_t.Pid", Field, 0}, + {"Flock_t.Start", Field, 0}, + {"Flock_t.Sysid", Field, 0}, + {"Flock_t.Type", Field, 0}, + {"Flock_t.Whence", Field, 0}, + {"FlushBpf", Func, 0}, + {"FlushFileBuffers", Func, 0}, + {"FlushViewOfFile", Func, 0}, + {"ForkExec", Func, 0}, + {"ForkLock", Var, 0}, + {"FormatMessage", Func, 0}, + {"Fpathconf", Func, 0}, + {"FreeAddrInfoW", Func, 1}, + {"FreeEnvironmentStrings", Func, 0}, + {"FreeLibrary", Func, 0}, + {"Fsid", Type, 0}, + {"Fsid.Val", Field, 0}, + {"Fsid.X__fsid_val", Field, 2}, + {"Fsid.X__val", Field, 0}, + {"Fstat", Func, 0}, + {"Fstatat", Func, 12}, + {"Fstatfs", Func, 0}, + {"Fstore_t", Type, 0}, + {"Fstore_t.Bytesalloc", Field, 0}, + {"Fstore_t.Flags", Field, 0}, + {"Fstore_t.Length", Field, 0}, + {"Fstore_t.Offset", Field, 0}, + {"Fstore_t.Posmode", Field, 0}, + {"Fsync", Func, 0}, + {"Ftruncate", Func, 0}, + {"FullPath", Func, 4}, + {"Futimes", Func, 0}, + {"Futimesat", Func, 0}, + {"GENERIC_ALL", Const, 0}, + {"GENERIC_EXECUTE", Const, 0}, + {"GENERIC_READ", Const, 0}, + {"GENERIC_WRITE", Const, 0}, + {"GUID", Type, 1}, + {"GUID.Data1", Field, 1}, + {"GUID.Data2", Field, 1}, + {"GUID.Data3", Field, 1}, + {"GUID.Data4", Field, 1}, + {"GetAcceptExSockaddrs", Func, 0}, + {"GetAdaptersInfo", Func, 0}, + {"GetAddrInfoW", Func, 1}, + {"GetCommandLine", Func, 0}, + {"GetComputerName", Func, 0}, + {"GetConsoleMode", Func, 1}, + {"GetCurrentDirectory", Func, 0}, + {"GetCurrentProcess", Func, 0}, + {"GetEnvironmentStrings", Func, 0}, + {"GetEnvironmentVariable", Func, 0}, + {"GetExitCodeProcess", Func, 0}, + {"GetFileAttributes", Func, 0}, + {"GetFileAttributesEx", Func, 0}, + {"GetFileExInfoStandard", Const, 0}, + {"GetFileExMaxInfoLevel", Const, 0}, + {"GetFileInformationByHandle", Func, 0}, + {"GetFileType", Func, 0}, + {"GetFullPathName", Func, 0}, + {"GetHostByName", Func, 0}, + {"GetIfEntry", Func, 0}, + {"GetLastError", Func, 0}, + {"GetLengthSid", Func, 0}, + {"GetLongPathName", Func, 0}, + {"GetProcAddress", Func, 0}, + {"GetProcessTimes", Func, 0}, + {"GetProtoByName", Func, 0}, + {"GetQueuedCompletionStatus", Func, 0}, + {"GetServByName", Func, 0}, + {"GetShortPathName", Func, 0}, + {"GetStartupInfo", Func, 0}, + {"GetStdHandle", Func, 0}, + {"GetSystemTimeAsFileTime", Func, 0}, + {"GetTempPath", Func, 0}, + {"GetTimeZoneInformation", Func, 0}, + {"GetTokenInformation", Func, 0}, + {"GetUserNameEx", Func, 0}, + {"GetUserProfileDirectory", Func, 0}, + {"GetVersion", Func, 0}, + {"Getcwd", Func, 0}, + {"Getdents", Func, 0}, + {"Getdirentries", Func, 0}, + {"Getdtablesize", Func, 0}, + {"Getegid", Func, 0}, + {"Getenv", Func, 0}, + {"Geteuid", Func, 0}, + {"Getfsstat", Func, 0}, + {"Getgid", Func, 0}, + {"Getgroups", Func, 0}, + {"Getpagesize", Func, 0}, + {"Getpeername", Func, 0}, + {"Getpgid", Func, 0}, + {"Getpgrp", Func, 0}, + {"Getpid", Func, 0}, + {"Getppid", Func, 0}, + {"Getpriority", Func, 0}, + {"Getrlimit", Func, 0}, + {"Getrusage", Func, 0}, + {"Getsid", Func, 0}, + {"Getsockname", Func, 0}, + {"Getsockopt", Func, 1}, + {"GetsockoptByte", Func, 0}, + {"GetsockoptICMPv6Filter", Func, 2}, + {"GetsockoptIPMreq", Func, 0}, + {"GetsockoptIPMreqn", Func, 0}, + {"GetsockoptIPv6MTUInfo", Func, 2}, + {"GetsockoptIPv6Mreq", Func, 0}, + {"GetsockoptInet4Addr", Func, 0}, + {"GetsockoptInt", Func, 0}, + {"GetsockoptUcred", Func, 1}, + {"Gettid", Func, 0}, + {"Gettimeofday", Func, 0}, + {"Getuid", Func, 0}, + {"Getwd", Func, 0}, + {"Getxattr", Func, 1}, + {"HANDLE_FLAG_INHERIT", Const, 0}, + {"HKEY_CLASSES_ROOT", Const, 0}, + {"HKEY_CURRENT_CONFIG", Const, 0}, + {"HKEY_CURRENT_USER", Const, 0}, + {"HKEY_DYN_DATA", Const, 0}, + {"HKEY_LOCAL_MACHINE", Const, 0}, + {"HKEY_PERFORMANCE_DATA", Const, 0}, + {"HKEY_USERS", Const, 0}, + {"HUPCL", Const, 0}, + {"Handle", Type, 0}, + {"Hostent", Type, 0}, + {"Hostent.AddrList", Field, 0}, + {"Hostent.AddrType", Field, 0}, + {"Hostent.Aliases", Field, 0}, + {"Hostent.Length", Field, 0}, + {"Hostent.Name", Field, 0}, + {"ICANON", Const, 0}, + {"ICMP6_FILTER", Const, 2}, + {"ICMPV6_FILTER", Const, 2}, + {"ICMPv6Filter", Type, 2}, + {"ICMPv6Filter.Data", Field, 2}, + {"ICMPv6Filter.Filt", Field, 2}, + {"ICRNL", Const, 0}, + {"IEXTEN", Const, 0}, + {"IFAN_ARRIVAL", Const, 1}, + {"IFAN_DEPARTURE", Const, 1}, + {"IFA_ADDRESS", Const, 0}, + {"IFA_ANYCAST", Const, 0}, + {"IFA_BROADCAST", Const, 0}, + {"IFA_CACHEINFO", Const, 0}, + {"IFA_F_DADFAILED", Const, 0}, + {"IFA_F_DEPRECATED", Const, 0}, + {"IFA_F_HOMEADDRESS", Const, 0}, + {"IFA_F_NODAD", Const, 0}, + {"IFA_F_OPTIMISTIC", Const, 0}, + {"IFA_F_PERMANENT", Const, 0}, + {"IFA_F_SECONDARY", Const, 0}, + {"IFA_F_TEMPORARY", Const, 0}, + {"IFA_F_TENTATIVE", Const, 0}, + {"IFA_LABEL", Const, 0}, + {"IFA_LOCAL", Const, 0}, + {"IFA_MAX", Const, 0}, + {"IFA_MULTICAST", Const, 0}, + {"IFA_ROUTE", Const, 1}, + {"IFA_UNSPEC", Const, 0}, + {"IFF_ALLMULTI", Const, 0}, + {"IFF_ALTPHYS", Const, 0}, + {"IFF_AUTOMEDIA", Const, 0}, + {"IFF_BROADCAST", Const, 0}, + {"IFF_CANTCHANGE", Const, 0}, + {"IFF_CANTCONFIG", Const, 1}, + {"IFF_DEBUG", Const, 0}, + {"IFF_DRV_OACTIVE", Const, 0}, + {"IFF_DRV_RUNNING", Const, 0}, + {"IFF_DYING", Const, 0}, + {"IFF_DYNAMIC", Const, 0}, + {"IFF_LINK0", Const, 0}, + {"IFF_LINK1", Const, 0}, + {"IFF_LINK2", Const, 0}, + {"IFF_LOOPBACK", Const, 0}, + {"IFF_MASTER", Const, 0}, + {"IFF_MONITOR", Const, 0}, + {"IFF_MULTICAST", Const, 0}, + {"IFF_NOARP", Const, 0}, + {"IFF_NOTRAILERS", Const, 0}, + {"IFF_NO_PI", Const, 0}, + {"IFF_OACTIVE", Const, 0}, + {"IFF_ONE_QUEUE", Const, 0}, + {"IFF_POINTOPOINT", Const, 0}, + {"IFF_POINTTOPOINT", Const, 0}, + {"IFF_PORTSEL", Const, 0}, + {"IFF_PPROMISC", Const, 0}, + {"IFF_PROMISC", Const, 0}, + {"IFF_RENAMING", Const, 0}, + {"IFF_RUNNING", Const, 0}, + {"IFF_SIMPLEX", Const, 0}, + {"IFF_SLAVE", Const, 0}, + {"IFF_SMART", Const, 0}, + {"IFF_STATICARP", Const, 0}, + {"IFF_TAP", Const, 0}, + {"IFF_TUN", Const, 0}, + {"IFF_TUN_EXCL", Const, 0}, + {"IFF_UP", Const, 0}, + {"IFF_VNET_HDR", Const, 0}, + {"IFLA_ADDRESS", Const, 0}, + {"IFLA_BROADCAST", Const, 0}, + {"IFLA_COST", Const, 0}, + {"IFLA_IFALIAS", Const, 0}, + {"IFLA_IFNAME", Const, 0}, + {"IFLA_LINK", Const, 0}, + {"IFLA_LINKINFO", Const, 0}, + {"IFLA_LINKMODE", Const, 0}, + {"IFLA_MAP", Const, 0}, + {"IFLA_MASTER", Const, 0}, + {"IFLA_MAX", Const, 0}, + {"IFLA_MTU", Const, 0}, + {"IFLA_NET_NS_PID", Const, 0}, + {"IFLA_OPERSTATE", Const, 0}, + {"IFLA_PRIORITY", Const, 0}, + {"IFLA_PROTINFO", Const, 0}, + {"IFLA_QDISC", Const, 0}, + {"IFLA_STATS", Const, 0}, + {"IFLA_TXQLEN", Const, 0}, + {"IFLA_UNSPEC", Const, 0}, + {"IFLA_WEIGHT", Const, 0}, + {"IFLA_WIRELESS", Const, 0}, + {"IFNAMSIZ", Const, 0}, + {"IFT_1822", Const, 0}, + {"IFT_A12MPPSWITCH", Const, 0}, + {"IFT_AAL2", Const, 0}, + {"IFT_AAL5", Const, 0}, + {"IFT_ADSL", Const, 0}, + {"IFT_AFLANE8023", Const, 0}, + {"IFT_AFLANE8025", Const, 0}, + {"IFT_ARAP", Const, 0}, + {"IFT_ARCNET", Const, 0}, + {"IFT_ARCNETPLUS", Const, 0}, + {"IFT_ASYNC", Const, 0}, + {"IFT_ATM", Const, 0}, + {"IFT_ATMDXI", Const, 0}, + {"IFT_ATMFUNI", Const, 0}, + {"IFT_ATMIMA", Const, 0}, + {"IFT_ATMLOGICAL", Const, 0}, + {"IFT_ATMRADIO", Const, 0}, + {"IFT_ATMSUBINTERFACE", Const, 0}, + {"IFT_ATMVCIENDPT", Const, 0}, + {"IFT_ATMVIRTUAL", Const, 0}, + {"IFT_BGPPOLICYACCOUNTING", Const, 0}, + {"IFT_BLUETOOTH", Const, 1}, + {"IFT_BRIDGE", Const, 0}, + {"IFT_BSC", Const, 0}, + {"IFT_CARP", Const, 0}, + {"IFT_CCTEMUL", Const, 0}, + {"IFT_CELLULAR", Const, 0}, + {"IFT_CEPT", Const, 0}, + {"IFT_CES", Const, 0}, + {"IFT_CHANNEL", Const, 0}, + {"IFT_CNR", Const, 0}, + {"IFT_COFFEE", Const, 0}, + {"IFT_COMPOSITELINK", Const, 0}, + {"IFT_DCN", Const, 0}, + {"IFT_DIGITALPOWERLINE", Const, 0}, + {"IFT_DIGITALWRAPPEROVERHEADCHANNEL", Const, 0}, + {"IFT_DLSW", Const, 0}, + {"IFT_DOCSCABLEDOWNSTREAM", Const, 0}, + {"IFT_DOCSCABLEMACLAYER", Const, 0}, + {"IFT_DOCSCABLEUPSTREAM", Const, 0}, + {"IFT_DOCSCABLEUPSTREAMCHANNEL", Const, 1}, + {"IFT_DS0", Const, 0}, + {"IFT_DS0BUNDLE", Const, 0}, + {"IFT_DS1FDL", Const, 0}, + {"IFT_DS3", Const, 0}, + {"IFT_DTM", Const, 0}, + {"IFT_DUMMY", Const, 1}, + {"IFT_DVBASILN", Const, 0}, + {"IFT_DVBASIOUT", Const, 0}, + {"IFT_DVBRCCDOWNSTREAM", Const, 0}, + {"IFT_DVBRCCMACLAYER", Const, 0}, + {"IFT_DVBRCCUPSTREAM", Const, 0}, + {"IFT_ECONET", Const, 1}, + {"IFT_ENC", Const, 0}, + {"IFT_EON", Const, 0}, + {"IFT_EPLRS", Const, 0}, + {"IFT_ESCON", Const, 0}, + {"IFT_ETHER", Const, 0}, + {"IFT_FAITH", Const, 0}, + {"IFT_FAST", Const, 0}, + {"IFT_FASTETHER", Const, 0}, + {"IFT_FASTETHERFX", Const, 0}, + {"IFT_FDDI", Const, 0}, + {"IFT_FIBRECHANNEL", Const, 0}, + {"IFT_FRAMERELAYINTERCONNECT", Const, 0}, + {"IFT_FRAMERELAYMPI", Const, 0}, + {"IFT_FRDLCIENDPT", Const, 0}, + {"IFT_FRELAY", Const, 0}, + {"IFT_FRELAYDCE", Const, 0}, + {"IFT_FRF16MFRBUNDLE", Const, 0}, + {"IFT_FRFORWARD", Const, 0}, + {"IFT_G703AT2MB", Const, 0}, + {"IFT_G703AT64K", Const, 0}, + {"IFT_GIF", Const, 0}, + {"IFT_GIGABITETHERNET", Const, 0}, + {"IFT_GR303IDT", Const, 0}, + {"IFT_GR303RDT", Const, 0}, + {"IFT_H323GATEKEEPER", Const, 0}, + {"IFT_H323PROXY", Const, 0}, + {"IFT_HDH1822", Const, 0}, + {"IFT_HDLC", Const, 0}, + {"IFT_HDSL2", Const, 0}, + {"IFT_HIPERLAN2", Const, 0}, + {"IFT_HIPPI", Const, 0}, + {"IFT_HIPPIINTERFACE", Const, 0}, + {"IFT_HOSTPAD", Const, 0}, + {"IFT_HSSI", Const, 0}, + {"IFT_HY", Const, 0}, + {"IFT_IBM370PARCHAN", Const, 0}, + {"IFT_IDSL", Const, 0}, + {"IFT_IEEE1394", Const, 0}, + {"IFT_IEEE80211", Const, 0}, + {"IFT_IEEE80212", Const, 0}, + {"IFT_IEEE8023ADLAG", Const, 0}, + {"IFT_IFGSN", Const, 0}, + {"IFT_IMT", Const, 0}, + {"IFT_INFINIBAND", Const, 1}, + {"IFT_INTERLEAVE", Const, 0}, + {"IFT_IP", Const, 0}, + {"IFT_IPFORWARD", Const, 0}, + {"IFT_IPOVERATM", Const, 0}, + {"IFT_IPOVERCDLC", Const, 0}, + {"IFT_IPOVERCLAW", Const, 0}, + {"IFT_IPSWITCH", Const, 0}, + {"IFT_IPXIP", Const, 0}, + {"IFT_ISDN", Const, 0}, + {"IFT_ISDNBASIC", Const, 0}, + {"IFT_ISDNPRIMARY", Const, 0}, + {"IFT_ISDNS", Const, 0}, + {"IFT_ISDNU", Const, 0}, + {"IFT_ISO88022LLC", Const, 0}, + {"IFT_ISO88023", Const, 0}, + {"IFT_ISO88024", Const, 0}, + {"IFT_ISO88025", Const, 0}, + {"IFT_ISO88025CRFPINT", Const, 0}, + {"IFT_ISO88025DTR", Const, 0}, + {"IFT_ISO88025FIBER", Const, 0}, + {"IFT_ISO88026", Const, 0}, + {"IFT_ISUP", Const, 0}, + {"IFT_L2VLAN", Const, 0}, + {"IFT_L3IPVLAN", Const, 0}, + {"IFT_L3IPXVLAN", Const, 0}, + {"IFT_LAPB", Const, 0}, + {"IFT_LAPD", Const, 0}, + {"IFT_LAPF", Const, 0}, + {"IFT_LINEGROUP", Const, 1}, + {"IFT_LOCALTALK", Const, 0}, + {"IFT_LOOP", Const, 0}, + {"IFT_MEDIAMAILOVERIP", Const, 0}, + {"IFT_MFSIGLINK", Const, 0}, + {"IFT_MIOX25", Const, 0}, + {"IFT_MODEM", Const, 0}, + {"IFT_MPC", Const, 0}, + {"IFT_MPLS", Const, 0}, + {"IFT_MPLSTUNNEL", Const, 0}, + {"IFT_MSDSL", Const, 0}, + {"IFT_MVL", Const, 0}, + {"IFT_MYRINET", Const, 0}, + {"IFT_NFAS", Const, 0}, + {"IFT_NSIP", Const, 0}, + {"IFT_OPTICALCHANNEL", Const, 0}, + {"IFT_OPTICALTRANSPORT", Const, 0}, + {"IFT_OTHER", Const, 0}, + {"IFT_P10", Const, 0}, + {"IFT_P80", Const, 0}, + {"IFT_PARA", Const, 0}, + {"IFT_PDP", Const, 0}, + {"IFT_PFLOG", Const, 0}, + {"IFT_PFLOW", Const, 1}, + {"IFT_PFSYNC", Const, 0}, + {"IFT_PLC", Const, 0}, + {"IFT_PON155", Const, 1}, + {"IFT_PON622", Const, 1}, + {"IFT_POS", Const, 0}, + {"IFT_PPP", Const, 0}, + {"IFT_PPPMULTILINKBUNDLE", Const, 0}, + {"IFT_PROPATM", Const, 1}, + {"IFT_PROPBWAP2MP", Const, 0}, + {"IFT_PROPCNLS", Const, 0}, + {"IFT_PROPDOCSWIRELESSDOWNSTREAM", Const, 0}, + {"IFT_PROPDOCSWIRELESSMACLAYER", Const, 0}, + {"IFT_PROPDOCSWIRELESSUPSTREAM", Const, 0}, + {"IFT_PROPMUX", Const, 0}, + {"IFT_PROPVIRTUAL", Const, 0}, + {"IFT_PROPWIRELESSP2P", Const, 0}, + {"IFT_PTPSERIAL", Const, 0}, + {"IFT_PVC", Const, 0}, + {"IFT_Q2931", Const, 1}, + {"IFT_QLLC", Const, 0}, + {"IFT_RADIOMAC", Const, 0}, + {"IFT_RADSL", Const, 0}, + {"IFT_REACHDSL", Const, 0}, + {"IFT_RFC1483", Const, 0}, + {"IFT_RS232", Const, 0}, + {"IFT_RSRB", Const, 0}, + {"IFT_SDLC", Const, 0}, + {"IFT_SDSL", Const, 0}, + {"IFT_SHDSL", Const, 0}, + {"IFT_SIP", Const, 0}, + {"IFT_SIPSIG", Const, 1}, + {"IFT_SIPTG", Const, 1}, + {"IFT_SLIP", Const, 0}, + {"IFT_SMDSDXI", Const, 0}, + {"IFT_SMDSICIP", Const, 0}, + {"IFT_SONET", Const, 0}, + {"IFT_SONETOVERHEADCHANNEL", Const, 0}, + {"IFT_SONETPATH", Const, 0}, + {"IFT_SONETVT", Const, 0}, + {"IFT_SRP", Const, 0}, + {"IFT_SS7SIGLINK", Const, 0}, + {"IFT_STACKTOSTACK", Const, 0}, + {"IFT_STARLAN", Const, 0}, + {"IFT_STF", Const, 0}, + {"IFT_T1", Const, 0}, + {"IFT_TDLC", Const, 0}, + {"IFT_TELINK", Const, 1}, + {"IFT_TERMPAD", Const, 0}, + {"IFT_TR008", Const, 0}, + {"IFT_TRANSPHDLC", Const, 0}, + {"IFT_TUNNEL", Const, 0}, + {"IFT_ULTRA", Const, 0}, + {"IFT_USB", Const, 0}, + {"IFT_V11", Const, 0}, + {"IFT_V35", Const, 0}, + {"IFT_V36", Const, 0}, + {"IFT_V37", Const, 0}, + {"IFT_VDSL", Const, 0}, + {"IFT_VIRTUALIPADDRESS", Const, 0}, + {"IFT_VIRTUALTG", Const, 1}, + {"IFT_VOICEDID", Const, 1}, + {"IFT_VOICEEM", Const, 0}, + {"IFT_VOICEEMFGD", Const, 1}, + {"IFT_VOICEENCAP", Const, 0}, + {"IFT_VOICEFGDEANA", Const, 1}, + {"IFT_VOICEFXO", Const, 0}, + {"IFT_VOICEFXS", Const, 0}, + {"IFT_VOICEOVERATM", Const, 0}, + {"IFT_VOICEOVERCABLE", Const, 1}, + {"IFT_VOICEOVERFRAMERELAY", Const, 0}, + {"IFT_VOICEOVERIP", Const, 0}, + {"IFT_X213", Const, 0}, + {"IFT_X25", Const, 0}, + {"IFT_X25DDN", Const, 0}, + {"IFT_X25HUNTGROUP", Const, 0}, + {"IFT_X25MLP", Const, 0}, + {"IFT_X25PLE", Const, 0}, + {"IFT_XETHER", Const, 0}, + {"IGNBRK", Const, 0}, + {"IGNCR", Const, 0}, + {"IGNORE", Const, 0}, + {"IGNPAR", Const, 0}, + {"IMAXBEL", Const, 0}, + {"INFINITE", Const, 0}, + {"INLCR", Const, 0}, + {"INPCK", Const, 0}, + {"INVALID_FILE_ATTRIBUTES", Const, 0}, + {"IN_ACCESS", Const, 0}, + {"IN_ALL_EVENTS", Const, 0}, + {"IN_ATTRIB", Const, 0}, + {"IN_CLASSA_HOST", Const, 0}, + {"IN_CLASSA_MAX", Const, 0}, + {"IN_CLASSA_NET", Const, 0}, + {"IN_CLASSA_NSHIFT", Const, 0}, + {"IN_CLASSB_HOST", Const, 0}, + {"IN_CLASSB_MAX", Const, 0}, + {"IN_CLASSB_NET", Const, 0}, + {"IN_CLASSB_NSHIFT", Const, 0}, + {"IN_CLASSC_HOST", Const, 0}, + {"IN_CLASSC_NET", Const, 0}, + {"IN_CLASSC_NSHIFT", Const, 0}, + {"IN_CLASSD_HOST", Const, 0}, + {"IN_CLASSD_NET", Const, 0}, + {"IN_CLASSD_NSHIFT", Const, 0}, + {"IN_CLOEXEC", Const, 0}, + {"IN_CLOSE", Const, 0}, + {"IN_CLOSE_NOWRITE", Const, 0}, + {"IN_CLOSE_WRITE", Const, 0}, + {"IN_CREATE", Const, 0}, + {"IN_DELETE", Const, 0}, + {"IN_DELETE_SELF", Const, 0}, + {"IN_DONT_FOLLOW", Const, 0}, + {"IN_EXCL_UNLINK", Const, 0}, + {"IN_IGNORED", Const, 0}, + {"IN_ISDIR", Const, 0}, + {"IN_LINKLOCALNETNUM", Const, 0}, + {"IN_LOOPBACKNET", Const, 0}, + {"IN_MASK_ADD", Const, 0}, + {"IN_MODIFY", Const, 0}, + {"IN_MOVE", Const, 0}, + {"IN_MOVED_FROM", Const, 0}, + {"IN_MOVED_TO", Const, 0}, + {"IN_MOVE_SELF", Const, 0}, + {"IN_NONBLOCK", Const, 0}, + {"IN_ONESHOT", Const, 0}, + {"IN_ONLYDIR", Const, 0}, + {"IN_OPEN", Const, 0}, + {"IN_Q_OVERFLOW", Const, 0}, + {"IN_RFC3021_HOST", Const, 1}, + {"IN_RFC3021_MASK", Const, 1}, + {"IN_RFC3021_NET", Const, 1}, + {"IN_RFC3021_NSHIFT", Const, 1}, + {"IN_UNMOUNT", Const, 0}, + {"IOC_IN", Const, 1}, + {"IOC_INOUT", Const, 1}, + {"IOC_OUT", Const, 1}, + {"IOC_VENDOR", Const, 3}, + {"IOC_WS2", Const, 1}, + {"IO_REPARSE_TAG_SYMLINK", Const, 4}, + {"IPMreq", Type, 0}, + {"IPMreq.Interface", Field, 0}, + {"IPMreq.Multiaddr", Field, 0}, + {"IPMreqn", Type, 0}, + {"IPMreqn.Address", Field, 0}, + {"IPMreqn.Ifindex", Field, 0}, + {"IPMreqn.Multiaddr", Field, 0}, + {"IPPROTO_3PC", Const, 0}, + {"IPPROTO_ADFS", Const, 0}, + {"IPPROTO_AH", Const, 0}, + {"IPPROTO_AHIP", Const, 0}, + {"IPPROTO_APES", Const, 0}, + {"IPPROTO_ARGUS", Const, 0}, + {"IPPROTO_AX25", Const, 0}, + {"IPPROTO_BHA", Const, 0}, + {"IPPROTO_BLT", Const, 0}, + {"IPPROTO_BRSATMON", Const, 0}, + {"IPPROTO_CARP", Const, 0}, + {"IPPROTO_CFTP", Const, 0}, + {"IPPROTO_CHAOS", Const, 0}, + {"IPPROTO_CMTP", Const, 0}, + {"IPPROTO_COMP", Const, 0}, + {"IPPROTO_CPHB", Const, 0}, + {"IPPROTO_CPNX", Const, 0}, + {"IPPROTO_DCCP", Const, 0}, + {"IPPROTO_DDP", Const, 0}, + {"IPPROTO_DGP", Const, 0}, + {"IPPROTO_DIVERT", Const, 0}, + {"IPPROTO_DIVERT_INIT", Const, 3}, + {"IPPROTO_DIVERT_RESP", Const, 3}, + {"IPPROTO_DONE", Const, 0}, + {"IPPROTO_DSTOPTS", Const, 0}, + {"IPPROTO_EGP", Const, 0}, + {"IPPROTO_EMCON", Const, 0}, + {"IPPROTO_ENCAP", Const, 0}, + {"IPPROTO_EON", Const, 0}, + {"IPPROTO_ESP", Const, 0}, + {"IPPROTO_ETHERIP", Const, 0}, + {"IPPROTO_FRAGMENT", Const, 0}, + {"IPPROTO_GGP", Const, 0}, + {"IPPROTO_GMTP", Const, 0}, + {"IPPROTO_GRE", Const, 0}, + {"IPPROTO_HELLO", Const, 0}, + {"IPPROTO_HMP", Const, 0}, + {"IPPROTO_HOPOPTS", Const, 0}, + {"IPPROTO_ICMP", Const, 0}, + {"IPPROTO_ICMPV6", Const, 0}, + {"IPPROTO_IDP", Const, 0}, + {"IPPROTO_IDPR", Const, 0}, + {"IPPROTO_IDRP", Const, 0}, + {"IPPROTO_IGMP", Const, 0}, + {"IPPROTO_IGP", Const, 0}, + {"IPPROTO_IGRP", Const, 0}, + {"IPPROTO_IL", Const, 0}, + {"IPPROTO_INLSP", Const, 0}, + {"IPPROTO_INP", Const, 0}, + {"IPPROTO_IP", Const, 0}, + {"IPPROTO_IPCOMP", Const, 0}, + {"IPPROTO_IPCV", Const, 0}, + {"IPPROTO_IPEIP", Const, 0}, + {"IPPROTO_IPIP", Const, 0}, + {"IPPROTO_IPPC", Const, 0}, + {"IPPROTO_IPV4", Const, 0}, + {"IPPROTO_IPV6", Const, 0}, + {"IPPROTO_IPV6_ICMP", Const, 1}, + {"IPPROTO_IRTP", Const, 0}, + {"IPPROTO_KRYPTOLAN", Const, 0}, + {"IPPROTO_LARP", Const, 0}, + {"IPPROTO_LEAF1", Const, 0}, + {"IPPROTO_LEAF2", Const, 0}, + {"IPPROTO_MAX", Const, 0}, + {"IPPROTO_MAXID", Const, 0}, + {"IPPROTO_MEAS", Const, 0}, + {"IPPROTO_MH", Const, 1}, + {"IPPROTO_MHRP", Const, 0}, + {"IPPROTO_MICP", Const, 0}, + {"IPPROTO_MOBILE", Const, 0}, + {"IPPROTO_MPLS", Const, 1}, + {"IPPROTO_MTP", Const, 0}, + {"IPPROTO_MUX", Const, 0}, + {"IPPROTO_ND", Const, 0}, + {"IPPROTO_NHRP", Const, 0}, + {"IPPROTO_NONE", Const, 0}, + {"IPPROTO_NSP", Const, 0}, + {"IPPROTO_NVPII", Const, 0}, + {"IPPROTO_OLD_DIVERT", Const, 0}, + {"IPPROTO_OSPFIGP", Const, 0}, + {"IPPROTO_PFSYNC", Const, 0}, + {"IPPROTO_PGM", Const, 0}, + {"IPPROTO_PIGP", Const, 0}, + {"IPPROTO_PIM", Const, 0}, + {"IPPROTO_PRM", Const, 0}, + {"IPPROTO_PUP", Const, 0}, + {"IPPROTO_PVP", Const, 0}, + {"IPPROTO_RAW", Const, 0}, + {"IPPROTO_RCCMON", Const, 0}, + {"IPPROTO_RDP", Const, 0}, + {"IPPROTO_ROUTING", Const, 0}, + {"IPPROTO_RSVP", Const, 0}, + {"IPPROTO_RVD", Const, 0}, + {"IPPROTO_SATEXPAK", Const, 0}, + {"IPPROTO_SATMON", Const, 0}, + {"IPPROTO_SCCSP", Const, 0}, + {"IPPROTO_SCTP", Const, 0}, + {"IPPROTO_SDRP", Const, 0}, + {"IPPROTO_SEND", Const, 1}, + {"IPPROTO_SEP", Const, 0}, + {"IPPROTO_SKIP", Const, 0}, + {"IPPROTO_SPACER", Const, 0}, + {"IPPROTO_SRPC", Const, 0}, + {"IPPROTO_ST", Const, 0}, + {"IPPROTO_SVMTP", Const, 0}, + {"IPPROTO_SWIPE", Const, 0}, + {"IPPROTO_TCF", Const, 0}, + {"IPPROTO_TCP", Const, 0}, + {"IPPROTO_TLSP", Const, 0}, + {"IPPROTO_TP", Const, 0}, + {"IPPROTO_TPXX", Const, 0}, + {"IPPROTO_TRUNK1", Const, 0}, + {"IPPROTO_TRUNK2", Const, 0}, + {"IPPROTO_TTP", Const, 0}, + {"IPPROTO_UDP", Const, 0}, + {"IPPROTO_UDPLITE", Const, 0}, + {"IPPROTO_VINES", Const, 0}, + {"IPPROTO_VISA", Const, 0}, + {"IPPROTO_VMTP", Const, 0}, + {"IPPROTO_VRRP", Const, 1}, + {"IPPROTO_WBEXPAK", Const, 0}, + {"IPPROTO_WBMON", Const, 0}, + {"IPPROTO_WSN", Const, 0}, + {"IPPROTO_XNET", Const, 0}, + {"IPPROTO_XTP", Const, 0}, + {"IPV6_2292DSTOPTS", Const, 0}, + {"IPV6_2292HOPLIMIT", Const, 0}, + {"IPV6_2292HOPOPTS", Const, 0}, + {"IPV6_2292NEXTHOP", Const, 0}, + {"IPV6_2292PKTINFO", Const, 0}, + {"IPV6_2292PKTOPTIONS", Const, 0}, + {"IPV6_2292RTHDR", Const, 0}, + {"IPV6_ADDRFORM", Const, 0}, + {"IPV6_ADD_MEMBERSHIP", Const, 0}, + {"IPV6_AUTHHDR", Const, 0}, + {"IPV6_AUTH_LEVEL", Const, 1}, + {"IPV6_AUTOFLOWLABEL", Const, 0}, + {"IPV6_BINDANY", Const, 0}, + {"IPV6_BINDV6ONLY", Const, 0}, + {"IPV6_BOUND_IF", Const, 0}, + {"IPV6_CHECKSUM", Const, 0}, + {"IPV6_DEFAULT_MULTICAST_HOPS", Const, 0}, + {"IPV6_DEFAULT_MULTICAST_LOOP", Const, 0}, + {"IPV6_DEFHLIM", Const, 0}, + {"IPV6_DONTFRAG", Const, 0}, + {"IPV6_DROP_MEMBERSHIP", Const, 0}, + {"IPV6_DSTOPTS", Const, 0}, + {"IPV6_ESP_NETWORK_LEVEL", Const, 1}, + {"IPV6_ESP_TRANS_LEVEL", Const, 1}, + {"IPV6_FAITH", Const, 0}, + {"IPV6_FLOWINFO_MASK", Const, 0}, + {"IPV6_FLOWLABEL_MASK", Const, 0}, + {"IPV6_FRAGTTL", Const, 0}, + {"IPV6_FW_ADD", Const, 0}, + {"IPV6_FW_DEL", Const, 0}, + {"IPV6_FW_FLUSH", Const, 0}, + {"IPV6_FW_GET", Const, 0}, + {"IPV6_FW_ZERO", Const, 0}, + {"IPV6_HLIMDEC", Const, 0}, + {"IPV6_HOPLIMIT", Const, 0}, + {"IPV6_HOPOPTS", Const, 0}, + {"IPV6_IPCOMP_LEVEL", Const, 1}, + {"IPV6_IPSEC_POLICY", Const, 0}, + {"IPV6_JOIN_ANYCAST", Const, 0}, + {"IPV6_JOIN_GROUP", Const, 0}, + {"IPV6_LEAVE_ANYCAST", Const, 0}, + {"IPV6_LEAVE_GROUP", Const, 0}, + {"IPV6_MAXHLIM", Const, 0}, + {"IPV6_MAXOPTHDR", Const, 0}, + {"IPV6_MAXPACKET", Const, 0}, + {"IPV6_MAX_GROUP_SRC_FILTER", Const, 0}, + {"IPV6_MAX_MEMBERSHIPS", Const, 0}, + {"IPV6_MAX_SOCK_SRC_FILTER", Const, 0}, + {"IPV6_MIN_MEMBERSHIPS", Const, 0}, + {"IPV6_MMTU", Const, 0}, + {"IPV6_MSFILTER", Const, 0}, + {"IPV6_MTU", Const, 0}, + {"IPV6_MTU_DISCOVER", Const, 0}, + {"IPV6_MULTICAST_HOPS", Const, 0}, + {"IPV6_MULTICAST_IF", Const, 0}, + {"IPV6_MULTICAST_LOOP", Const, 0}, + {"IPV6_NEXTHOP", Const, 0}, + {"IPV6_OPTIONS", Const, 1}, + {"IPV6_PATHMTU", Const, 0}, + {"IPV6_PIPEX", Const, 1}, + {"IPV6_PKTINFO", Const, 0}, + {"IPV6_PMTUDISC_DO", Const, 0}, + {"IPV6_PMTUDISC_DONT", Const, 0}, + {"IPV6_PMTUDISC_PROBE", Const, 0}, + {"IPV6_PMTUDISC_WANT", Const, 0}, + {"IPV6_PORTRANGE", Const, 0}, + {"IPV6_PORTRANGE_DEFAULT", Const, 0}, + {"IPV6_PORTRANGE_HIGH", Const, 0}, + {"IPV6_PORTRANGE_LOW", Const, 0}, + {"IPV6_PREFER_TEMPADDR", Const, 0}, + {"IPV6_RECVDSTOPTS", Const, 0}, + {"IPV6_RECVDSTPORT", Const, 3}, + {"IPV6_RECVERR", Const, 0}, + {"IPV6_RECVHOPLIMIT", Const, 0}, + {"IPV6_RECVHOPOPTS", Const, 0}, + {"IPV6_RECVPATHMTU", Const, 0}, + {"IPV6_RECVPKTINFO", Const, 0}, + {"IPV6_RECVRTHDR", Const, 0}, + {"IPV6_RECVTCLASS", Const, 0}, + {"IPV6_ROUTER_ALERT", Const, 0}, + {"IPV6_RTABLE", Const, 1}, + {"IPV6_RTHDR", Const, 0}, + {"IPV6_RTHDRDSTOPTS", Const, 0}, + {"IPV6_RTHDR_LOOSE", Const, 0}, + {"IPV6_RTHDR_STRICT", Const, 0}, + {"IPV6_RTHDR_TYPE_0", Const, 0}, + {"IPV6_RXDSTOPTS", Const, 0}, + {"IPV6_RXHOPOPTS", Const, 0}, + {"IPV6_SOCKOPT_RESERVED1", Const, 0}, + {"IPV6_TCLASS", Const, 0}, + {"IPV6_UNICAST_HOPS", Const, 0}, + {"IPV6_USE_MIN_MTU", Const, 0}, + {"IPV6_V6ONLY", Const, 0}, + {"IPV6_VERSION", Const, 0}, + {"IPV6_VERSION_MASK", Const, 0}, + {"IPV6_XFRM_POLICY", Const, 0}, + {"IP_ADD_MEMBERSHIP", Const, 0}, + {"IP_ADD_SOURCE_MEMBERSHIP", Const, 0}, + {"IP_AUTH_LEVEL", Const, 1}, + {"IP_BINDANY", Const, 0}, + {"IP_BLOCK_SOURCE", Const, 0}, + {"IP_BOUND_IF", Const, 0}, + {"IP_DEFAULT_MULTICAST_LOOP", Const, 0}, + {"IP_DEFAULT_MULTICAST_TTL", Const, 0}, + {"IP_DF", Const, 0}, + {"IP_DIVERTFL", Const, 3}, + {"IP_DONTFRAG", Const, 0}, + {"IP_DROP_MEMBERSHIP", Const, 0}, + {"IP_DROP_SOURCE_MEMBERSHIP", Const, 0}, + {"IP_DUMMYNET3", Const, 0}, + {"IP_DUMMYNET_CONFIGURE", Const, 0}, + {"IP_DUMMYNET_DEL", Const, 0}, + {"IP_DUMMYNET_FLUSH", Const, 0}, + {"IP_DUMMYNET_GET", Const, 0}, + {"IP_EF", Const, 1}, + {"IP_ERRORMTU", Const, 1}, + {"IP_ESP_NETWORK_LEVEL", Const, 1}, + {"IP_ESP_TRANS_LEVEL", Const, 1}, + {"IP_FAITH", Const, 0}, + {"IP_FREEBIND", Const, 0}, + {"IP_FW3", Const, 0}, + {"IP_FW_ADD", Const, 0}, + {"IP_FW_DEL", Const, 0}, + {"IP_FW_FLUSH", Const, 0}, + {"IP_FW_GET", Const, 0}, + {"IP_FW_NAT_CFG", Const, 0}, + {"IP_FW_NAT_DEL", Const, 0}, + {"IP_FW_NAT_GET_CONFIG", Const, 0}, + {"IP_FW_NAT_GET_LOG", Const, 0}, + {"IP_FW_RESETLOG", Const, 0}, + {"IP_FW_TABLE_ADD", Const, 0}, + {"IP_FW_TABLE_DEL", Const, 0}, + {"IP_FW_TABLE_FLUSH", Const, 0}, + {"IP_FW_TABLE_GETSIZE", Const, 0}, + {"IP_FW_TABLE_LIST", Const, 0}, + {"IP_FW_ZERO", Const, 0}, + {"IP_HDRINCL", Const, 0}, + {"IP_IPCOMP_LEVEL", Const, 1}, + {"IP_IPSECFLOWINFO", Const, 1}, + {"IP_IPSEC_LOCAL_AUTH", Const, 1}, + {"IP_IPSEC_LOCAL_CRED", Const, 1}, + {"IP_IPSEC_LOCAL_ID", Const, 1}, + {"IP_IPSEC_POLICY", Const, 0}, + {"IP_IPSEC_REMOTE_AUTH", Const, 1}, + {"IP_IPSEC_REMOTE_CRED", Const, 1}, + {"IP_IPSEC_REMOTE_ID", Const, 1}, + {"IP_MAXPACKET", Const, 0}, + {"IP_MAX_GROUP_SRC_FILTER", Const, 0}, + {"IP_MAX_MEMBERSHIPS", Const, 0}, + {"IP_MAX_SOCK_MUTE_FILTER", Const, 0}, + {"IP_MAX_SOCK_SRC_FILTER", Const, 0}, + {"IP_MAX_SOURCE_FILTER", Const, 0}, + {"IP_MF", Const, 0}, + {"IP_MINFRAGSIZE", Const, 1}, + {"IP_MINTTL", Const, 0}, + {"IP_MIN_MEMBERSHIPS", Const, 0}, + {"IP_MSFILTER", Const, 0}, + {"IP_MSS", Const, 0}, + {"IP_MTU", Const, 0}, + {"IP_MTU_DISCOVER", Const, 0}, + {"IP_MULTICAST_IF", Const, 0}, + {"IP_MULTICAST_IFINDEX", Const, 0}, + {"IP_MULTICAST_LOOP", Const, 0}, + {"IP_MULTICAST_TTL", Const, 0}, + {"IP_MULTICAST_VIF", Const, 0}, + {"IP_NAT__XXX", Const, 0}, + {"IP_OFFMASK", Const, 0}, + {"IP_OLD_FW_ADD", Const, 0}, + {"IP_OLD_FW_DEL", Const, 0}, + {"IP_OLD_FW_FLUSH", Const, 0}, + {"IP_OLD_FW_GET", Const, 0}, + {"IP_OLD_FW_RESETLOG", Const, 0}, + {"IP_OLD_FW_ZERO", Const, 0}, + {"IP_ONESBCAST", Const, 0}, + {"IP_OPTIONS", Const, 0}, + {"IP_ORIGDSTADDR", Const, 0}, + {"IP_PASSSEC", Const, 0}, + {"IP_PIPEX", Const, 1}, + {"IP_PKTINFO", Const, 0}, + {"IP_PKTOPTIONS", Const, 0}, + {"IP_PMTUDISC", Const, 0}, + {"IP_PMTUDISC_DO", Const, 0}, + {"IP_PMTUDISC_DONT", Const, 0}, + {"IP_PMTUDISC_PROBE", Const, 0}, + {"IP_PMTUDISC_WANT", Const, 0}, + {"IP_PORTRANGE", Const, 0}, + {"IP_PORTRANGE_DEFAULT", Const, 0}, + {"IP_PORTRANGE_HIGH", Const, 0}, + {"IP_PORTRANGE_LOW", Const, 0}, + {"IP_RECVDSTADDR", Const, 0}, + {"IP_RECVDSTPORT", Const, 1}, + {"IP_RECVERR", Const, 0}, + {"IP_RECVIF", Const, 0}, + {"IP_RECVOPTS", Const, 0}, + {"IP_RECVORIGDSTADDR", Const, 0}, + {"IP_RECVPKTINFO", Const, 0}, + {"IP_RECVRETOPTS", Const, 0}, + {"IP_RECVRTABLE", Const, 1}, + {"IP_RECVTOS", Const, 0}, + {"IP_RECVTTL", Const, 0}, + {"IP_RETOPTS", Const, 0}, + {"IP_RF", Const, 0}, + {"IP_ROUTER_ALERT", Const, 0}, + {"IP_RSVP_OFF", Const, 0}, + {"IP_RSVP_ON", Const, 0}, + {"IP_RSVP_VIF_OFF", Const, 0}, + {"IP_RSVP_VIF_ON", Const, 0}, + {"IP_RTABLE", Const, 1}, + {"IP_SENDSRCADDR", Const, 0}, + {"IP_STRIPHDR", Const, 0}, + {"IP_TOS", Const, 0}, + {"IP_TRAFFIC_MGT_BACKGROUND", Const, 0}, + {"IP_TRANSPARENT", Const, 0}, + {"IP_TTL", Const, 0}, + {"IP_UNBLOCK_SOURCE", Const, 0}, + {"IP_XFRM_POLICY", Const, 0}, + {"IPv6MTUInfo", Type, 2}, + {"IPv6MTUInfo.Addr", Field, 2}, + {"IPv6MTUInfo.Mtu", Field, 2}, + {"IPv6Mreq", Type, 0}, + {"IPv6Mreq.Interface", Field, 0}, + {"IPv6Mreq.Multiaddr", Field, 0}, + {"ISIG", Const, 0}, + {"ISTRIP", Const, 0}, + {"IUCLC", Const, 0}, + {"IUTF8", Const, 0}, + {"IXANY", Const, 0}, + {"IXOFF", Const, 0}, + {"IXON", Const, 0}, + {"IfAddrmsg", Type, 0}, + {"IfAddrmsg.Family", Field, 0}, + {"IfAddrmsg.Flags", Field, 0}, + {"IfAddrmsg.Index", Field, 0}, + {"IfAddrmsg.Prefixlen", Field, 0}, + {"IfAddrmsg.Scope", Field, 0}, + {"IfAnnounceMsghdr", Type, 1}, + {"IfAnnounceMsghdr.Hdrlen", Field, 2}, + {"IfAnnounceMsghdr.Index", Field, 1}, + {"IfAnnounceMsghdr.Msglen", Field, 1}, + {"IfAnnounceMsghdr.Name", Field, 1}, + {"IfAnnounceMsghdr.Type", Field, 1}, + {"IfAnnounceMsghdr.Version", Field, 1}, + {"IfAnnounceMsghdr.What", Field, 1}, + {"IfData", Type, 0}, + {"IfData.Addrlen", Field, 0}, + {"IfData.Baudrate", Field, 0}, + {"IfData.Capabilities", Field, 2}, + {"IfData.Collisions", Field, 0}, + {"IfData.Datalen", Field, 0}, + {"IfData.Epoch", Field, 0}, + {"IfData.Hdrlen", Field, 0}, + {"IfData.Hwassist", Field, 0}, + {"IfData.Ibytes", Field, 0}, + {"IfData.Ierrors", Field, 0}, + {"IfData.Imcasts", Field, 0}, + {"IfData.Ipackets", Field, 0}, + {"IfData.Iqdrops", Field, 0}, + {"IfData.Lastchange", Field, 0}, + {"IfData.Link_state", Field, 0}, + {"IfData.Mclpool", Field, 2}, + {"IfData.Metric", Field, 0}, + {"IfData.Mtu", Field, 0}, + {"IfData.Noproto", Field, 0}, + {"IfData.Obytes", Field, 0}, + {"IfData.Oerrors", Field, 0}, + {"IfData.Omcasts", Field, 0}, + {"IfData.Opackets", Field, 0}, + {"IfData.Pad", Field, 2}, + {"IfData.Pad_cgo_0", Field, 2}, + {"IfData.Pad_cgo_1", Field, 2}, + {"IfData.Physical", Field, 0}, + {"IfData.Recvquota", Field, 0}, + {"IfData.Recvtiming", Field, 0}, + {"IfData.Reserved1", Field, 0}, + {"IfData.Reserved2", Field, 0}, + {"IfData.Spare_char1", Field, 0}, + {"IfData.Spare_char2", Field, 0}, + {"IfData.Type", Field, 0}, + {"IfData.Typelen", Field, 0}, + {"IfData.Unused1", Field, 0}, + {"IfData.Unused2", Field, 0}, + {"IfData.Xmitquota", Field, 0}, + {"IfData.Xmittiming", Field, 0}, + {"IfInfomsg", Type, 0}, + {"IfInfomsg.Change", Field, 0}, + {"IfInfomsg.Family", Field, 0}, + {"IfInfomsg.Flags", Field, 0}, + {"IfInfomsg.Index", Field, 0}, + {"IfInfomsg.Type", Field, 0}, + {"IfInfomsg.X__ifi_pad", Field, 0}, + {"IfMsghdr", Type, 0}, + {"IfMsghdr.Addrs", Field, 0}, + {"IfMsghdr.Data", Field, 0}, + {"IfMsghdr.Flags", Field, 0}, + {"IfMsghdr.Hdrlen", Field, 2}, + {"IfMsghdr.Index", Field, 0}, + {"IfMsghdr.Msglen", Field, 0}, + {"IfMsghdr.Pad1", Field, 2}, + {"IfMsghdr.Pad2", Field, 2}, + {"IfMsghdr.Pad_cgo_0", Field, 0}, + {"IfMsghdr.Pad_cgo_1", Field, 2}, + {"IfMsghdr.Tableid", Field, 2}, + {"IfMsghdr.Type", Field, 0}, + {"IfMsghdr.Version", Field, 0}, + {"IfMsghdr.Xflags", Field, 2}, + {"IfaMsghdr", Type, 0}, + {"IfaMsghdr.Addrs", Field, 0}, + {"IfaMsghdr.Flags", Field, 0}, + {"IfaMsghdr.Hdrlen", Field, 2}, + {"IfaMsghdr.Index", Field, 0}, + {"IfaMsghdr.Metric", Field, 0}, + {"IfaMsghdr.Msglen", Field, 0}, + {"IfaMsghdr.Pad1", Field, 2}, + {"IfaMsghdr.Pad2", Field, 2}, + {"IfaMsghdr.Pad_cgo_0", Field, 0}, + {"IfaMsghdr.Tableid", Field, 2}, + {"IfaMsghdr.Type", Field, 0}, + {"IfaMsghdr.Version", Field, 0}, + {"IfmaMsghdr", Type, 0}, + {"IfmaMsghdr.Addrs", Field, 0}, + {"IfmaMsghdr.Flags", Field, 0}, + {"IfmaMsghdr.Index", Field, 0}, + {"IfmaMsghdr.Msglen", Field, 0}, + {"IfmaMsghdr.Pad_cgo_0", Field, 0}, + {"IfmaMsghdr.Type", Field, 0}, + {"IfmaMsghdr.Version", Field, 0}, + {"IfmaMsghdr2", Type, 0}, + {"IfmaMsghdr2.Addrs", Field, 0}, + {"IfmaMsghdr2.Flags", Field, 0}, + {"IfmaMsghdr2.Index", Field, 0}, + {"IfmaMsghdr2.Msglen", Field, 0}, + {"IfmaMsghdr2.Pad_cgo_0", Field, 0}, + {"IfmaMsghdr2.Refcount", Field, 0}, + {"IfmaMsghdr2.Type", Field, 0}, + {"IfmaMsghdr2.Version", Field, 0}, + {"ImplementsGetwd", Const, 0}, + {"Inet4Pktinfo", Type, 0}, + {"Inet4Pktinfo.Addr", Field, 0}, + {"Inet4Pktinfo.Ifindex", Field, 0}, + {"Inet4Pktinfo.Spec_dst", Field, 0}, + {"Inet6Pktinfo", Type, 0}, + {"Inet6Pktinfo.Addr", Field, 0}, + {"Inet6Pktinfo.Ifindex", Field, 0}, + {"InotifyAddWatch", Func, 0}, + {"InotifyEvent", Type, 0}, + {"InotifyEvent.Cookie", Field, 0}, + {"InotifyEvent.Len", Field, 0}, + {"InotifyEvent.Mask", Field, 0}, + {"InotifyEvent.Name", Field, 0}, + {"InotifyEvent.Wd", Field, 0}, + {"InotifyInit", Func, 0}, + {"InotifyInit1", Func, 0}, + {"InotifyRmWatch", Func, 0}, + {"InterfaceAddrMessage", Type, 0}, + {"InterfaceAddrMessage.Data", Field, 0}, + {"InterfaceAddrMessage.Header", Field, 0}, + {"InterfaceAnnounceMessage", Type, 1}, + {"InterfaceAnnounceMessage.Header", Field, 1}, + {"InterfaceInfo", Type, 0}, + {"InterfaceInfo.Address", Field, 0}, + {"InterfaceInfo.BroadcastAddress", Field, 0}, + {"InterfaceInfo.Flags", Field, 0}, + {"InterfaceInfo.Netmask", Field, 0}, + {"InterfaceMessage", Type, 0}, + {"InterfaceMessage.Data", Field, 0}, + {"InterfaceMessage.Header", Field, 0}, + {"InterfaceMulticastAddrMessage", Type, 0}, + {"InterfaceMulticastAddrMessage.Data", Field, 0}, + {"InterfaceMulticastAddrMessage.Header", Field, 0}, + {"InvalidHandle", Const, 0}, + {"Ioperm", Func, 0}, + {"Iopl", Func, 0}, + {"Iovec", Type, 0}, + {"Iovec.Base", Field, 0}, + {"Iovec.Len", Field, 0}, + {"IpAdapterInfo", Type, 0}, + {"IpAdapterInfo.AdapterName", Field, 0}, + {"IpAdapterInfo.Address", Field, 0}, + {"IpAdapterInfo.AddressLength", Field, 0}, + {"IpAdapterInfo.ComboIndex", Field, 0}, + {"IpAdapterInfo.CurrentIpAddress", Field, 0}, + {"IpAdapterInfo.Description", Field, 0}, + {"IpAdapterInfo.DhcpEnabled", Field, 0}, + {"IpAdapterInfo.DhcpServer", Field, 0}, + {"IpAdapterInfo.GatewayList", Field, 0}, + {"IpAdapterInfo.HaveWins", Field, 0}, + {"IpAdapterInfo.Index", Field, 0}, + {"IpAdapterInfo.IpAddressList", Field, 0}, + {"IpAdapterInfo.LeaseExpires", Field, 0}, + {"IpAdapterInfo.LeaseObtained", Field, 0}, + {"IpAdapterInfo.Next", Field, 0}, + {"IpAdapterInfo.PrimaryWinsServer", Field, 0}, + {"IpAdapterInfo.SecondaryWinsServer", Field, 0}, + {"IpAdapterInfo.Type", Field, 0}, + {"IpAddrString", Type, 0}, + {"IpAddrString.Context", Field, 0}, + {"IpAddrString.IpAddress", Field, 0}, + {"IpAddrString.IpMask", Field, 0}, + {"IpAddrString.Next", Field, 0}, + {"IpAddressString", Type, 0}, + {"IpAddressString.String", Field, 0}, + {"IpMaskString", Type, 0}, + {"IpMaskString.String", Field, 2}, + {"Issetugid", Func, 0}, + {"KEY_ALL_ACCESS", Const, 0}, + {"KEY_CREATE_LINK", Const, 0}, + {"KEY_CREATE_SUB_KEY", Const, 0}, + {"KEY_ENUMERATE_SUB_KEYS", Const, 0}, + {"KEY_EXECUTE", Const, 0}, + {"KEY_NOTIFY", Const, 0}, + {"KEY_QUERY_VALUE", Const, 0}, + {"KEY_READ", Const, 0}, + {"KEY_SET_VALUE", Const, 0}, + {"KEY_WOW64_32KEY", Const, 0}, + {"KEY_WOW64_64KEY", Const, 0}, + {"KEY_WRITE", Const, 0}, + {"Kevent", Func, 0}, + {"Kevent_t", Type, 0}, + {"Kevent_t.Data", Field, 0}, + {"Kevent_t.Fflags", Field, 0}, + {"Kevent_t.Filter", Field, 0}, + {"Kevent_t.Flags", Field, 0}, + {"Kevent_t.Ident", Field, 0}, + {"Kevent_t.Pad_cgo_0", Field, 2}, + {"Kevent_t.Udata", Field, 0}, + {"Kill", Func, 0}, + {"Klogctl", Func, 0}, + {"Kqueue", Func, 0}, + {"LANG_ENGLISH", Const, 0}, + {"LAYERED_PROTOCOL", Const, 2}, + {"LCNT_OVERLOAD_FLUSH", Const, 1}, + {"LINUX_REBOOT_CMD_CAD_OFF", Const, 0}, + {"LINUX_REBOOT_CMD_CAD_ON", Const, 0}, + {"LINUX_REBOOT_CMD_HALT", Const, 0}, + {"LINUX_REBOOT_CMD_KEXEC", Const, 0}, + {"LINUX_REBOOT_CMD_POWER_OFF", Const, 0}, + {"LINUX_REBOOT_CMD_RESTART", Const, 0}, + {"LINUX_REBOOT_CMD_RESTART2", Const, 0}, + {"LINUX_REBOOT_CMD_SW_SUSPEND", Const, 0}, + {"LINUX_REBOOT_MAGIC1", Const, 0}, + {"LINUX_REBOOT_MAGIC2", Const, 0}, + {"LOCK_EX", Const, 0}, + {"LOCK_NB", Const, 0}, + {"LOCK_SH", Const, 0}, + {"LOCK_UN", Const, 0}, + {"LazyDLL", Type, 0}, + {"LazyDLL.Name", Field, 0}, + {"LazyProc", Type, 0}, + {"LazyProc.Name", Field, 0}, + {"Lchown", Func, 0}, + {"Linger", Type, 0}, + {"Linger.Linger", Field, 0}, + {"Linger.Onoff", Field, 0}, + {"Link", Func, 0}, + {"Listen", Func, 0}, + {"Listxattr", Func, 1}, + {"LoadCancelIoEx", Func, 1}, + {"LoadConnectEx", Func, 1}, + {"LoadCreateSymbolicLink", Func, 4}, + {"LoadDLL", Func, 0}, + {"LoadGetAddrInfo", Func, 1}, + {"LoadLibrary", Func, 0}, + {"LoadSetFileCompletionNotificationModes", Func, 2}, + {"LocalFree", Func, 0}, + {"Log2phys_t", Type, 0}, + {"Log2phys_t.Contigbytes", Field, 0}, + {"Log2phys_t.Devoffset", Field, 0}, + {"Log2phys_t.Flags", Field, 0}, + {"LookupAccountName", Func, 0}, + {"LookupAccountSid", Func, 0}, + {"LookupSID", Func, 0}, + {"LsfJump", Func, 0}, + {"LsfSocket", Func, 0}, + {"LsfStmt", Func, 0}, + {"Lstat", Func, 0}, + {"MADV_AUTOSYNC", Const, 1}, + {"MADV_CAN_REUSE", Const, 0}, + {"MADV_CORE", Const, 1}, + {"MADV_DOFORK", Const, 0}, + {"MADV_DONTFORK", Const, 0}, + {"MADV_DONTNEED", Const, 0}, + {"MADV_FREE", Const, 0}, + {"MADV_FREE_REUSABLE", Const, 0}, + {"MADV_FREE_REUSE", Const, 0}, + {"MADV_HUGEPAGE", Const, 0}, + {"MADV_HWPOISON", Const, 0}, + {"MADV_MERGEABLE", Const, 0}, + {"MADV_NOCORE", Const, 1}, + {"MADV_NOHUGEPAGE", Const, 0}, + {"MADV_NORMAL", Const, 0}, + {"MADV_NOSYNC", Const, 1}, + {"MADV_PROTECT", Const, 1}, + {"MADV_RANDOM", Const, 0}, + {"MADV_REMOVE", Const, 0}, + {"MADV_SEQUENTIAL", Const, 0}, + {"MADV_SPACEAVAIL", Const, 3}, + {"MADV_UNMERGEABLE", Const, 0}, + {"MADV_WILLNEED", Const, 0}, + {"MADV_ZERO_WIRED_PAGES", Const, 0}, + {"MAP_32BIT", Const, 0}, + {"MAP_ALIGNED_SUPER", Const, 3}, + {"MAP_ALIGNMENT_16MB", Const, 3}, + {"MAP_ALIGNMENT_1TB", Const, 3}, + {"MAP_ALIGNMENT_256TB", Const, 3}, + {"MAP_ALIGNMENT_4GB", Const, 3}, + {"MAP_ALIGNMENT_64KB", Const, 3}, + {"MAP_ALIGNMENT_64PB", Const, 3}, + {"MAP_ALIGNMENT_MASK", Const, 3}, + {"MAP_ALIGNMENT_SHIFT", Const, 3}, + {"MAP_ANON", Const, 0}, + {"MAP_ANONYMOUS", Const, 0}, + {"MAP_COPY", Const, 0}, + {"MAP_DENYWRITE", Const, 0}, + {"MAP_EXECUTABLE", Const, 0}, + {"MAP_FILE", Const, 0}, + {"MAP_FIXED", Const, 0}, + {"MAP_FLAGMASK", Const, 3}, + {"MAP_GROWSDOWN", Const, 0}, + {"MAP_HASSEMAPHORE", Const, 0}, + {"MAP_HUGETLB", Const, 0}, + {"MAP_INHERIT", Const, 3}, + {"MAP_INHERIT_COPY", Const, 3}, + {"MAP_INHERIT_DEFAULT", Const, 3}, + {"MAP_INHERIT_DONATE_COPY", Const, 3}, + {"MAP_INHERIT_NONE", Const, 3}, + {"MAP_INHERIT_SHARE", Const, 3}, + {"MAP_JIT", Const, 0}, + {"MAP_LOCKED", Const, 0}, + {"MAP_NOCACHE", Const, 0}, + {"MAP_NOCORE", Const, 1}, + {"MAP_NOEXTEND", Const, 0}, + {"MAP_NONBLOCK", Const, 0}, + {"MAP_NORESERVE", Const, 0}, + {"MAP_NOSYNC", Const, 1}, + {"MAP_POPULATE", Const, 0}, + {"MAP_PREFAULT_READ", Const, 1}, + {"MAP_PRIVATE", Const, 0}, + {"MAP_RENAME", Const, 0}, + {"MAP_RESERVED0080", Const, 0}, + {"MAP_RESERVED0100", Const, 1}, + {"MAP_SHARED", Const, 0}, + {"MAP_STACK", Const, 0}, + {"MAP_TRYFIXED", Const, 3}, + {"MAP_TYPE", Const, 0}, + {"MAP_WIRED", Const, 3}, + {"MAXIMUM_REPARSE_DATA_BUFFER_SIZE", Const, 4}, + {"MAXLEN_IFDESCR", Const, 0}, + {"MAXLEN_PHYSADDR", Const, 0}, + {"MAX_ADAPTER_ADDRESS_LENGTH", Const, 0}, + {"MAX_ADAPTER_DESCRIPTION_LENGTH", Const, 0}, + {"MAX_ADAPTER_NAME_LENGTH", Const, 0}, + {"MAX_COMPUTERNAME_LENGTH", Const, 0}, + {"MAX_INTERFACE_NAME_LEN", Const, 0}, + {"MAX_LONG_PATH", Const, 0}, + {"MAX_PATH", Const, 0}, + {"MAX_PROTOCOL_CHAIN", Const, 2}, + {"MCL_CURRENT", Const, 0}, + {"MCL_FUTURE", Const, 0}, + {"MNT_DETACH", Const, 0}, + {"MNT_EXPIRE", Const, 0}, + {"MNT_FORCE", Const, 0}, + {"MSG_BCAST", Const, 1}, + {"MSG_CMSG_CLOEXEC", Const, 0}, + {"MSG_COMPAT", Const, 0}, + {"MSG_CONFIRM", Const, 0}, + {"MSG_CONTROLMBUF", Const, 1}, + {"MSG_CTRUNC", Const, 0}, + {"MSG_DONTROUTE", Const, 0}, + {"MSG_DONTWAIT", Const, 0}, + {"MSG_EOF", Const, 0}, + {"MSG_EOR", Const, 0}, + {"MSG_ERRQUEUE", Const, 0}, + {"MSG_FASTOPEN", Const, 1}, + {"MSG_FIN", Const, 0}, + {"MSG_FLUSH", Const, 0}, + {"MSG_HAVEMORE", Const, 0}, + {"MSG_HOLD", Const, 0}, + {"MSG_IOVUSRSPACE", Const, 1}, + {"MSG_LENUSRSPACE", Const, 1}, + {"MSG_MCAST", Const, 1}, + {"MSG_MORE", Const, 0}, + {"MSG_NAMEMBUF", Const, 1}, + {"MSG_NBIO", Const, 0}, + {"MSG_NEEDSA", Const, 0}, + {"MSG_NOSIGNAL", Const, 0}, + {"MSG_NOTIFICATION", Const, 0}, + {"MSG_OOB", Const, 0}, + {"MSG_PEEK", Const, 0}, + {"MSG_PROXY", Const, 0}, + {"MSG_RCVMORE", Const, 0}, + {"MSG_RST", Const, 0}, + {"MSG_SEND", Const, 0}, + {"MSG_SYN", Const, 0}, + {"MSG_TRUNC", Const, 0}, + {"MSG_TRYHARD", Const, 0}, + {"MSG_USERFLAGS", Const, 1}, + {"MSG_WAITALL", Const, 0}, + {"MSG_WAITFORONE", Const, 0}, + {"MSG_WAITSTREAM", Const, 0}, + {"MS_ACTIVE", Const, 0}, + {"MS_ASYNC", Const, 0}, + {"MS_BIND", Const, 0}, + {"MS_DEACTIVATE", Const, 0}, + {"MS_DIRSYNC", Const, 0}, + {"MS_INVALIDATE", Const, 0}, + {"MS_I_VERSION", Const, 0}, + {"MS_KERNMOUNT", Const, 0}, + {"MS_KILLPAGES", Const, 0}, + {"MS_MANDLOCK", Const, 0}, + {"MS_MGC_MSK", Const, 0}, + {"MS_MGC_VAL", Const, 0}, + {"MS_MOVE", Const, 0}, + {"MS_NOATIME", Const, 0}, + {"MS_NODEV", Const, 0}, + {"MS_NODIRATIME", Const, 0}, + {"MS_NOEXEC", Const, 0}, + {"MS_NOSUID", Const, 0}, + {"MS_NOUSER", Const, 0}, + {"MS_POSIXACL", Const, 0}, + {"MS_PRIVATE", Const, 0}, + {"MS_RDONLY", Const, 0}, + {"MS_REC", Const, 0}, + {"MS_RELATIME", Const, 0}, + {"MS_REMOUNT", Const, 0}, + {"MS_RMT_MASK", Const, 0}, + {"MS_SHARED", Const, 0}, + {"MS_SILENT", Const, 0}, + {"MS_SLAVE", Const, 0}, + {"MS_STRICTATIME", Const, 0}, + {"MS_SYNC", Const, 0}, + {"MS_SYNCHRONOUS", Const, 0}, + {"MS_UNBINDABLE", Const, 0}, + {"Madvise", Func, 0}, + {"MapViewOfFile", Func, 0}, + {"MaxTokenInfoClass", Const, 0}, + {"Mclpool", Type, 2}, + {"Mclpool.Alive", Field, 2}, + {"Mclpool.Cwm", Field, 2}, + {"Mclpool.Grown", Field, 2}, + {"Mclpool.Hwm", Field, 2}, + {"Mclpool.Lwm", Field, 2}, + {"MibIfRow", Type, 0}, + {"MibIfRow.AdminStatus", Field, 0}, + {"MibIfRow.Descr", Field, 0}, + {"MibIfRow.DescrLen", Field, 0}, + {"MibIfRow.InDiscards", Field, 0}, + {"MibIfRow.InErrors", Field, 0}, + {"MibIfRow.InNUcastPkts", Field, 0}, + {"MibIfRow.InOctets", Field, 0}, + {"MibIfRow.InUcastPkts", Field, 0}, + {"MibIfRow.InUnknownProtos", Field, 0}, + {"MibIfRow.Index", Field, 0}, + {"MibIfRow.LastChange", Field, 0}, + {"MibIfRow.Mtu", Field, 0}, + {"MibIfRow.Name", Field, 0}, + {"MibIfRow.OperStatus", Field, 0}, + {"MibIfRow.OutDiscards", Field, 0}, + {"MibIfRow.OutErrors", Field, 0}, + {"MibIfRow.OutNUcastPkts", Field, 0}, + {"MibIfRow.OutOctets", Field, 0}, + {"MibIfRow.OutQLen", Field, 0}, + {"MibIfRow.OutUcastPkts", Field, 0}, + {"MibIfRow.PhysAddr", Field, 0}, + {"MibIfRow.PhysAddrLen", Field, 0}, + {"MibIfRow.Speed", Field, 0}, + {"MibIfRow.Type", Field, 0}, + {"Mkdir", Func, 0}, + {"Mkdirat", Func, 0}, + {"Mkfifo", Func, 0}, + {"Mknod", Func, 0}, + {"Mknodat", Func, 0}, + {"Mlock", Func, 0}, + {"Mlockall", Func, 0}, + {"Mmap", Func, 0}, + {"Mount", Func, 0}, + {"MoveFile", Func, 0}, + {"Mprotect", Func, 0}, + {"Msghdr", Type, 0}, + {"Msghdr.Control", Field, 0}, + {"Msghdr.Controllen", Field, 0}, + {"Msghdr.Flags", Field, 0}, + {"Msghdr.Iov", Field, 0}, + {"Msghdr.Iovlen", Field, 0}, + {"Msghdr.Name", Field, 0}, + {"Msghdr.Namelen", Field, 0}, + {"Msghdr.Pad_cgo_0", Field, 0}, + {"Msghdr.Pad_cgo_1", Field, 0}, + {"Munlock", Func, 0}, + {"Munlockall", Func, 0}, + {"Munmap", Func, 0}, + {"MustLoadDLL", Func, 0}, + {"NAME_MAX", Const, 0}, + {"NETLINK_ADD_MEMBERSHIP", Const, 0}, + {"NETLINK_AUDIT", Const, 0}, + {"NETLINK_BROADCAST_ERROR", Const, 0}, + {"NETLINK_CONNECTOR", Const, 0}, + {"NETLINK_DNRTMSG", Const, 0}, + {"NETLINK_DROP_MEMBERSHIP", Const, 0}, + {"NETLINK_ECRYPTFS", Const, 0}, + {"NETLINK_FIB_LOOKUP", Const, 0}, + {"NETLINK_FIREWALL", Const, 0}, + {"NETLINK_GENERIC", Const, 0}, + {"NETLINK_INET_DIAG", Const, 0}, + {"NETLINK_IP6_FW", Const, 0}, + {"NETLINK_ISCSI", Const, 0}, + {"NETLINK_KOBJECT_UEVENT", Const, 0}, + {"NETLINK_NETFILTER", Const, 0}, + {"NETLINK_NFLOG", Const, 0}, + {"NETLINK_NO_ENOBUFS", Const, 0}, + {"NETLINK_PKTINFO", Const, 0}, + {"NETLINK_RDMA", Const, 0}, + {"NETLINK_ROUTE", Const, 0}, + {"NETLINK_SCSITRANSPORT", Const, 0}, + {"NETLINK_SELINUX", Const, 0}, + {"NETLINK_UNUSED", Const, 0}, + {"NETLINK_USERSOCK", Const, 0}, + {"NETLINK_XFRM", Const, 0}, + {"NET_RT_DUMP", Const, 0}, + {"NET_RT_DUMP2", Const, 0}, + {"NET_RT_FLAGS", Const, 0}, + {"NET_RT_IFLIST", Const, 0}, + {"NET_RT_IFLIST2", Const, 0}, + {"NET_RT_IFLISTL", Const, 1}, + {"NET_RT_IFMALIST", Const, 0}, + {"NET_RT_MAXID", Const, 0}, + {"NET_RT_OIFLIST", Const, 1}, + {"NET_RT_OOIFLIST", Const, 1}, + {"NET_RT_STAT", Const, 0}, + {"NET_RT_STATS", Const, 1}, + {"NET_RT_TABLE", Const, 1}, + {"NET_RT_TRASH", Const, 0}, + {"NLA_ALIGNTO", Const, 0}, + {"NLA_F_NESTED", Const, 0}, + {"NLA_F_NET_BYTEORDER", Const, 0}, + {"NLA_HDRLEN", Const, 0}, + {"NLMSG_ALIGNTO", Const, 0}, + {"NLMSG_DONE", Const, 0}, + {"NLMSG_ERROR", Const, 0}, + {"NLMSG_HDRLEN", Const, 0}, + {"NLMSG_MIN_TYPE", Const, 0}, + {"NLMSG_NOOP", Const, 0}, + {"NLMSG_OVERRUN", Const, 0}, + {"NLM_F_ACK", Const, 0}, + {"NLM_F_APPEND", Const, 0}, + {"NLM_F_ATOMIC", Const, 0}, + {"NLM_F_CREATE", Const, 0}, + {"NLM_F_DUMP", Const, 0}, + {"NLM_F_ECHO", Const, 0}, + {"NLM_F_EXCL", Const, 0}, + {"NLM_F_MATCH", Const, 0}, + {"NLM_F_MULTI", Const, 0}, + {"NLM_F_REPLACE", Const, 0}, + {"NLM_F_REQUEST", Const, 0}, + {"NLM_F_ROOT", Const, 0}, + {"NOFLSH", Const, 0}, + {"NOTE_ABSOLUTE", Const, 0}, + {"NOTE_ATTRIB", Const, 0}, + {"NOTE_BACKGROUND", Const, 16}, + {"NOTE_CHILD", Const, 0}, + {"NOTE_CRITICAL", Const, 16}, + {"NOTE_DELETE", Const, 0}, + {"NOTE_EOF", Const, 1}, + {"NOTE_EXEC", Const, 0}, + {"NOTE_EXIT", Const, 0}, + {"NOTE_EXITSTATUS", Const, 0}, + {"NOTE_EXIT_CSERROR", Const, 16}, + {"NOTE_EXIT_DECRYPTFAIL", Const, 16}, + {"NOTE_EXIT_DETAIL", Const, 16}, + {"NOTE_EXIT_DETAIL_MASK", Const, 16}, + {"NOTE_EXIT_MEMORY", Const, 16}, + {"NOTE_EXIT_REPARENTED", Const, 16}, + {"NOTE_EXTEND", Const, 0}, + {"NOTE_FFAND", Const, 0}, + {"NOTE_FFCOPY", Const, 0}, + {"NOTE_FFCTRLMASK", Const, 0}, + {"NOTE_FFLAGSMASK", Const, 0}, + {"NOTE_FFNOP", Const, 0}, + {"NOTE_FFOR", Const, 0}, + {"NOTE_FORK", Const, 0}, + {"NOTE_LEEWAY", Const, 16}, + {"NOTE_LINK", Const, 0}, + {"NOTE_LOWAT", Const, 0}, + {"NOTE_NONE", Const, 0}, + {"NOTE_NSECONDS", Const, 0}, + {"NOTE_PCTRLMASK", Const, 0}, + {"NOTE_PDATAMASK", Const, 0}, + {"NOTE_REAP", Const, 0}, + {"NOTE_RENAME", Const, 0}, + {"NOTE_RESOURCEEND", Const, 0}, + {"NOTE_REVOKE", Const, 0}, + {"NOTE_SECONDS", Const, 0}, + {"NOTE_SIGNAL", Const, 0}, + {"NOTE_TRACK", Const, 0}, + {"NOTE_TRACKERR", Const, 0}, + {"NOTE_TRIGGER", Const, 0}, + {"NOTE_TRUNCATE", Const, 1}, + {"NOTE_USECONDS", Const, 0}, + {"NOTE_VM_ERROR", Const, 0}, + {"NOTE_VM_PRESSURE", Const, 0}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", Const, 0}, + {"NOTE_VM_PRESSURE_TERMINATE", Const, 0}, + {"NOTE_WRITE", Const, 0}, + {"NameCanonical", Const, 0}, + {"NameCanonicalEx", Const, 0}, + {"NameDisplay", Const, 0}, + {"NameDnsDomain", Const, 0}, + {"NameFullyQualifiedDN", Const, 0}, + {"NameSamCompatible", Const, 0}, + {"NameServicePrincipal", Const, 0}, + {"NameUniqueId", Const, 0}, + {"NameUnknown", Const, 0}, + {"NameUserPrincipal", Const, 0}, + {"Nanosleep", Func, 0}, + {"NetApiBufferFree", Func, 0}, + {"NetGetJoinInformation", Func, 2}, + {"NetSetupDomainName", Const, 2}, + {"NetSetupUnjoined", Const, 2}, + {"NetSetupUnknownStatus", Const, 2}, + {"NetSetupWorkgroupName", Const, 2}, + {"NetUserGetInfo", Func, 0}, + {"NetlinkMessage", Type, 0}, + {"NetlinkMessage.Data", Field, 0}, + {"NetlinkMessage.Header", Field, 0}, + {"NetlinkRIB", Func, 0}, + {"NetlinkRouteAttr", Type, 0}, + {"NetlinkRouteAttr.Attr", Field, 0}, + {"NetlinkRouteAttr.Value", Field, 0}, + {"NetlinkRouteRequest", Type, 0}, + {"NetlinkRouteRequest.Data", Field, 0}, + {"NetlinkRouteRequest.Header", Field, 0}, + {"NewCallback", Func, 0}, + {"NewCallbackCDecl", Func, 3}, + {"NewLazyDLL", Func, 0}, + {"NlAttr", Type, 0}, + {"NlAttr.Len", Field, 0}, + {"NlAttr.Type", Field, 0}, + {"NlMsgerr", Type, 0}, + {"NlMsgerr.Error", Field, 0}, + {"NlMsgerr.Msg", Field, 0}, + {"NlMsghdr", Type, 0}, + {"NlMsghdr.Flags", Field, 0}, + {"NlMsghdr.Len", Field, 0}, + {"NlMsghdr.Pid", Field, 0}, + {"NlMsghdr.Seq", Field, 0}, + {"NlMsghdr.Type", Field, 0}, + {"NsecToFiletime", Func, 0}, + {"NsecToTimespec", Func, 0}, + {"NsecToTimeval", Func, 0}, + {"Ntohs", Func, 0}, + {"OCRNL", Const, 0}, + {"OFDEL", Const, 0}, + {"OFILL", Const, 0}, + {"OFIOGETBMAP", Const, 1}, + {"OID_PKIX_KP_SERVER_AUTH", Var, 0}, + {"OID_SERVER_GATED_CRYPTO", Var, 0}, + {"OID_SGC_NETSCAPE", Var, 0}, + {"OLCUC", Const, 0}, + {"ONLCR", Const, 0}, + {"ONLRET", Const, 0}, + {"ONOCR", Const, 0}, + {"ONOEOT", Const, 1}, + {"OPEN_ALWAYS", Const, 0}, + {"OPEN_EXISTING", Const, 0}, + {"OPOST", Const, 0}, + {"O_ACCMODE", Const, 0}, + {"O_ALERT", Const, 0}, + {"O_ALT_IO", Const, 1}, + {"O_APPEND", Const, 0}, + {"O_ASYNC", Const, 0}, + {"O_CLOEXEC", Const, 0}, + {"O_CREAT", Const, 0}, + {"O_DIRECT", Const, 0}, + {"O_DIRECTORY", Const, 0}, + {"O_DP_GETRAWENCRYPTED", Const, 16}, + {"O_DSYNC", Const, 0}, + {"O_EVTONLY", Const, 0}, + {"O_EXCL", Const, 0}, + {"O_EXEC", Const, 0}, + {"O_EXLOCK", Const, 0}, + {"O_FSYNC", Const, 0}, + {"O_LARGEFILE", Const, 0}, + {"O_NDELAY", Const, 0}, + {"O_NOATIME", Const, 0}, + {"O_NOCTTY", Const, 0}, + {"O_NOFOLLOW", Const, 0}, + {"O_NONBLOCK", Const, 0}, + {"O_NOSIGPIPE", Const, 1}, + {"O_POPUP", Const, 0}, + {"O_RDONLY", Const, 0}, + {"O_RDWR", Const, 0}, + {"O_RSYNC", Const, 0}, + {"O_SHLOCK", Const, 0}, + {"O_SYMLINK", Const, 0}, + {"O_SYNC", Const, 0}, + {"O_TRUNC", Const, 0}, + {"O_TTY_INIT", Const, 0}, + {"O_WRONLY", Const, 0}, + {"Open", Func, 0}, + {"OpenCurrentProcessToken", Func, 0}, + {"OpenProcess", Func, 0}, + {"OpenProcessToken", Func, 0}, + {"Openat", Func, 0}, + {"Overlapped", Type, 0}, + {"Overlapped.HEvent", Field, 0}, + {"Overlapped.Internal", Field, 0}, + {"Overlapped.InternalHigh", Field, 0}, + {"Overlapped.Offset", Field, 0}, + {"Overlapped.OffsetHigh", Field, 0}, + {"PACKET_ADD_MEMBERSHIP", Const, 0}, + {"PACKET_BROADCAST", Const, 0}, + {"PACKET_DROP_MEMBERSHIP", Const, 0}, + {"PACKET_FASTROUTE", Const, 0}, + {"PACKET_HOST", Const, 0}, + {"PACKET_LOOPBACK", Const, 0}, + {"PACKET_MR_ALLMULTI", Const, 0}, + {"PACKET_MR_MULTICAST", Const, 0}, + {"PACKET_MR_PROMISC", Const, 0}, + {"PACKET_MULTICAST", Const, 0}, + {"PACKET_OTHERHOST", Const, 0}, + {"PACKET_OUTGOING", Const, 0}, + {"PACKET_RECV_OUTPUT", Const, 0}, + {"PACKET_RX_RING", Const, 0}, + {"PACKET_STATISTICS", Const, 0}, + {"PAGE_EXECUTE_READ", Const, 0}, + {"PAGE_EXECUTE_READWRITE", Const, 0}, + {"PAGE_EXECUTE_WRITECOPY", Const, 0}, + {"PAGE_READONLY", Const, 0}, + {"PAGE_READWRITE", Const, 0}, + {"PAGE_WRITECOPY", Const, 0}, + {"PARENB", Const, 0}, + {"PARMRK", Const, 0}, + {"PARODD", Const, 0}, + {"PENDIN", Const, 0}, + {"PFL_HIDDEN", Const, 2}, + {"PFL_MATCHES_PROTOCOL_ZERO", Const, 2}, + {"PFL_MULTIPLE_PROTO_ENTRIES", Const, 2}, + {"PFL_NETWORKDIRECT_PROVIDER", Const, 2}, + {"PFL_RECOMMENDED_PROTO_ENTRY", Const, 2}, + {"PF_FLUSH", Const, 1}, + {"PKCS_7_ASN_ENCODING", Const, 0}, + {"PMC5_PIPELINE_FLUSH", Const, 1}, + {"PRIO_PGRP", Const, 2}, + {"PRIO_PROCESS", Const, 2}, + {"PRIO_USER", Const, 2}, + {"PRI_IOFLUSH", Const, 1}, + {"PROCESS_QUERY_INFORMATION", Const, 0}, + {"PROCESS_TERMINATE", Const, 2}, + {"PROT_EXEC", Const, 0}, + {"PROT_GROWSDOWN", Const, 0}, + {"PROT_GROWSUP", Const, 0}, + {"PROT_NONE", Const, 0}, + {"PROT_READ", Const, 0}, + {"PROT_WRITE", Const, 0}, + {"PROV_DH_SCHANNEL", Const, 0}, + {"PROV_DSS", Const, 0}, + {"PROV_DSS_DH", Const, 0}, + {"PROV_EC_ECDSA_FULL", Const, 0}, + {"PROV_EC_ECDSA_SIG", Const, 0}, + {"PROV_EC_ECNRA_FULL", Const, 0}, + {"PROV_EC_ECNRA_SIG", Const, 0}, + {"PROV_FORTEZZA", Const, 0}, + {"PROV_INTEL_SEC", Const, 0}, + {"PROV_MS_EXCHANGE", Const, 0}, + {"PROV_REPLACE_OWF", Const, 0}, + {"PROV_RNG", Const, 0}, + {"PROV_RSA_AES", Const, 0}, + {"PROV_RSA_FULL", Const, 0}, + {"PROV_RSA_SCHANNEL", Const, 0}, + {"PROV_RSA_SIG", Const, 0}, + {"PROV_SPYRUS_LYNKS", Const, 0}, + {"PROV_SSL", Const, 0}, + {"PR_CAPBSET_DROP", Const, 0}, + {"PR_CAPBSET_READ", Const, 0}, + {"PR_CLEAR_SECCOMP_FILTER", Const, 0}, + {"PR_ENDIAN_BIG", Const, 0}, + {"PR_ENDIAN_LITTLE", Const, 0}, + {"PR_ENDIAN_PPC_LITTLE", Const, 0}, + {"PR_FPEMU_NOPRINT", Const, 0}, + {"PR_FPEMU_SIGFPE", Const, 0}, + {"PR_FP_EXC_ASYNC", Const, 0}, + {"PR_FP_EXC_DISABLED", Const, 0}, + {"PR_FP_EXC_DIV", Const, 0}, + {"PR_FP_EXC_INV", Const, 0}, + {"PR_FP_EXC_NONRECOV", Const, 0}, + {"PR_FP_EXC_OVF", Const, 0}, + {"PR_FP_EXC_PRECISE", Const, 0}, + {"PR_FP_EXC_RES", Const, 0}, + {"PR_FP_EXC_SW_ENABLE", Const, 0}, + {"PR_FP_EXC_UND", Const, 0}, + {"PR_GET_DUMPABLE", Const, 0}, + {"PR_GET_ENDIAN", Const, 0}, + {"PR_GET_FPEMU", Const, 0}, + {"PR_GET_FPEXC", Const, 0}, + {"PR_GET_KEEPCAPS", Const, 0}, + {"PR_GET_NAME", Const, 0}, + {"PR_GET_PDEATHSIG", Const, 0}, + {"PR_GET_SECCOMP", Const, 0}, + {"PR_GET_SECCOMP_FILTER", Const, 0}, + {"PR_GET_SECUREBITS", Const, 0}, + {"PR_GET_TIMERSLACK", Const, 0}, + {"PR_GET_TIMING", Const, 0}, + {"PR_GET_TSC", Const, 0}, + {"PR_GET_UNALIGN", Const, 0}, + {"PR_MCE_KILL", Const, 0}, + {"PR_MCE_KILL_CLEAR", Const, 0}, + {"PR_MCE_KILL_DEFAULT", Const, 0}, + {"PR_MCE_KILL_EARLY", Const, 0}, + {"PR_MCE_KILL_GET", Const, 0}, + {"PR_MCE_KILL_LATE", Const, 0}, + {"PR_MCE_KILL_SET", Const, 0}, + {"PR_SECCOMP_FILTER_EVENT", Const, 0}, + {"PR_SECCOMP_FILTER_SYSCALL", Const, 0}, + {"PR_SET_DUMPABLE", Const, 0}, + {"PR_SET_ENDIAN", Const, 0}, + {"PR_SET_FPEMU", Const, 0}, + {"PR_SET_FPEXC", Const, 0}, + {"PR_SET_KEEPCAPS", Const, 0}, + {"PR_SET_NAME", Const, 0}, + {"PR_SET_PDEATHSIG", Const, 0}, + {"PR_SET_PTRACER", Const, 0}, + {"PR_SET_SECCOMP", Const, 0}, + {"PR_SET_SECCOMP_FILTER", Const, 0}, + {"PR_SET_SECUREBITS", Const, 0}, + {"PR_SET_TIMERSLACK", Const, 0}, + {"PR_SET_TIMING", Const, 0}, + {"PR_SET_TSC", Const, 0}, + {"PR_SET_UNALIGN", Const, 0}, + {"PR_TASK_PERF_EVENTS_DISABLE", Const, 0}, + {"PR_TASK_PERF_EVENTS_ENABLE", Const, 0}, + {"PR_TIMING_STATISTICAL", Const, 0}, + {"PR_TIMING_TIMESTAMP", Const, 0}, + {"PR_TSC_ENABLE", Const, 0}, + {"PR_TSC_SIGSEGV", Const, 0}, + {"PR_UNALIGN_NOPRINT", Const, 0}, + {"PR_UNALIGN_SIGBUS", Const, 0}, + {"PTRACE_ARCH_PRCTL", Const, 0}, + {"PTRACE_ATTACH", Const, 0}, + {"PTRACE_CONT", Const, 0}, + {"PTRACE_DETACH", Const, 0}, + {"PTRACE_EVENT_CLONE", Const, 0}, + {"PTRACE_EVENT_EXEC", Const, 0}, + {"PTRACE_EVENT_EXIT", Const, 0}, + {"PTRACE_EVENT_FORK", Const, 0}, + {"PTRACE_EVENT_VFORK", Const, 0}, + {"PTRACE_EVENT_VFORK_DONE", Const, 0}, + {"PTRACE_GETCRUNCHREGS", Const, 0}, + {"PTRACE_GETEVENTMSG", Const, 0}, + {"PTRACE_GETFPREGS", Const, 0}, + {"PTRACE_GETFPXREGS", Const, 0}, + {"PTRACE_GETHBPREGS", Const, 0}, + {"PTRACE_GETREGS", Const, 0}, + {"PTRACE_GETREGSET", Const, 0}, + {"PTRACE_GETSIGINFO", Const, 0}, + {"PTRACE_GETVFPREGS", Const, 0}, + {"PTRACE_GETWMMXREGS", Const, 0}, + {"PTRACE_GET_THREAD_AREA", Const, 0}, + {"PTRACE_KILL", Const, 0}, + {"PTRACE_OLDSETOPTIONS", Const, 0}, + {"PTRACE_O_MASK", Const, 0}, + {"PTRACE_O_TRACECLONE", Const, 0}, + {"PTRACE_O_TRACEEXEC", Const, 0}, + {"PTRACE_O_TRACEEXIT", Const, 0}, + {"PTRACE_O_TRACEFORK", Const, 0}, + {"PTRACE_O_TRACESYSGOOD", Const, 0}, + {"PTRACE_O_TRACEVFORK", Const, 0}, + {"PTRACE_O_TRACEVFORKDONE", Const, 0}, + {"PTRACE_PEEKDATA", Const, 0}, + {"PTRACE_PEEKTEXT", Const, 0}, + {"PTRACE_PEEKUSR", Const, 0}, + {"PTRACE_POKEDATA", Const, 0}, + {"PTRACE_POKETEXT", Const, 0}, + {"PTRACE_POKEUSR", Const, 0}, + {"PTRACE_SETCRUNCHREGS", Const, 0}, + {"PTRACE_SETFPREGS", Const, 0}, + {"PTRACE_SETFPXREGS", Const, 0}, + {"PTRACE_SETHBPREGS", Const, 0}, + {"PTRACE_SETOPTIONS", Const, 0}, + {"PTRACE_SETREGS", Const, 0}, + {"PTRACE_SETREGSET", Const, 0}, + {"PTRACE_SETSIGINFO", Const, 0}, + {"PTRACE_SETVFPREGS", Const, 0}, + {"PTRACE_SETWMMXREGS", Const, 0}, + {"PTRACE_SET_SYSCALL", Const, 0}, + {"PTRACE_SET_THREAD_AREA", Const, 0}, + {"PTRACE_SINGLEBLOCK", Const, 0}, + {"PTRACE_SINGLESTEP", Const, 0}, + {"PTRACE_SYSCALL", Const, 0}, + {"PTRACE_SYSEMU", Const, 0}, + {"PTRACE_SYSEMU_SINGLESTEP", Const, 0}, + {"PTRACE_TRACEME", Const, 0}, + {"PT_ATTACH", Const, 0}, + {"PT_ATTACHEXC", Const, 0}, + {"PT_CONTINUE", Const, 0}, + {"PT_DATA_ADDR", Const, 0}, + {"PT_DENY_ATTACH", Const, 0}, + {"PT_DETACH", Const, 0}, + {"PT_FIRSTMACH", Const, 0}, + {"PT_FORCEQUOTA", Const, 0}, + {"PT_KILL", Const, 0}, + {"PT_MASK", Const, 1}, + {"PT_READ_D", Const, 0}, + {"PT_READ_I", Const, 0}, + {"PT_READ_U", Const, 0}, + {"PT_SIGEXC", Const, 0}, + {"PT_STEP", Const, 0}, + {"PT_TEXT_ADDR", Const, 0}, + {"PT_TEXT_END_ADDR", Const, 0}, + {"PT_THUPDATE", Const, 0}, + {"PT_TRACE_ME", Const, 0}, + {"PT_WRITE_D", Const, 0}, + {"PT_WRITE_I", Const, 0}, + {"PT_WRITE_U", Const, 0}, + {"ParseDirent", Func, 0}, + {"ParseNetlinkMessage", Func, 0}, + {"ParseNetlinkRouteAttr", Func, 0}, + {"ParseRoutingMessage", Func, 0}, + {"ParseRoutingSockaddr", Func, 0}, + {"ParseSocketControlMessage", Func, 0}, + {"ParseUnixCredentials", Func, 0}, + {"ParseUnixRights", Func, 0}, + {"PathMax", Const, 0}, + {"Pathconf", Func, 0}, + {"Pause", Func, 0}, + {"Pipe", Func, 0}, + {"Pipe2", Func, 1}, + {"PivotRoot", Func, 0}, + {"Pointer", Type, 11}, + {"PostQueuedCompletionStatus", Func, 0}, + {"Pread", Func, 0}, + {"Proc", Type, 0}, + {"Proc.Dll", Field, 0}, + {"Proc.Name", Field, 0}, + {"ProcAttr", Type, 0}, + {"ProcAttr.Dir", Field, 0}, + {"ProcAttr.Env", Field, 0}, + {"ProcAttr.Files", Field, 0}, + {"ProcAttr.Sys", Field, 0}, + {"Process32First", Func, 4}, + {"Process32Next", Func, 4}, + {"ProcessEntry32", Type, 4}, + {"ProcessEntry32.DefaultHeapID", Field, 4}, + {"ProcessEntry32.ExeFile", Field, 4}, + {"ProcessEntry32.Flags", Field, 4}, + {"ProcessEntry32.ModuleID", Field, 4}, + {"ProcessEntry32.ParentProcessID", Field, 4}, + {"ProcessEntry32.PriClassBase", Field, 4}, + {"ProcessEntry32.ProcessID", Field, 4}, + {"ProcessEntry32.Size", Field, 4}, + {"ProcessEntry32.Threads", Field, 4}, + {"ProcessEntry32.Usage", Field, 4}, + {"ProcessInformation", Type, 0}, + {"ProcessInformation.Process", Field, 0}, + {"ProcessInformation.ProcessId", Field, 0}, + {"ProcessInformation.Thread", Field, 0}, + {"ProcessInformation.ThreadId", Field, 0}, + {"Protoent", Type, 0}, + {"Protoent.Aliases", Field, 0}, + {"Protoent.Name", Field, 0}, + {"Protoent.Proto", Field, 0}, + {"PtraceAttach", Func, 0}, + {"PtraceCont", Func, 0}, + {"PtraceDetach", Func, 0}, + {"PtraceGetEventMsg", Func, 0}, + {"PtraceGetRegs", Func, 0}, + {"PtracePeekData", Func, 0}, + {"PtracePeekText", Func, 0}, + {"PtracePokeData", Func, 0}, + {"PtracePokeText", Func, 0}, + {"PtraceRegs", Type, 0}, + {"PtraceRegs.Cs", Field, 0}, + {"PtraceRegs.Ds", Field, 0}, + {"PtraceRegs.Eax", Field, 0}, + {"PtraceRegs.Ebp", Field, 0}, + {"PtraceRegs.Ebx", Field, 0}, + {"PtraceRegs.Ecx", Field, 0}, + {"PtraceRegs.Edi", Field, 0}, + {"PtraceRegs.Edx", Field, 0}, + {"PtraceRegs.Eflags", Field, 0}, + {"PtraceRegs.Eip", Field, 0}, + {"PtraceRegs.Es", Field, 0}, + {"PtraceRegs.Esi", Field, 0}, + {"PtraceRegs.Esp", Field, 0}, + {"PtraceRegs.Fs", Field, 0}, + {"PtraceRegs.Fs_base", Field, 0}, + {"PtraceRegs.Gs", Field, 0}, + {"PtraceRegs.Gs_base", Field, 0}, + {"PtraceRegs.Orig_eax", Field, 0}, + {"PtraceRegs.Orig_rax", Field, 0}, + {"PtraceRegs.R10", Field, 0}, + {"PtraceRegs.R11", Field, 0}, + {"PtraceRegs.R12", Field, 0}, + {"PtraceRegs.R13", Field, 0}, + {"PtraceRegs.R14", Field, 0}, + {"PtraceRegs.R15", Field, 0}, + {"PtraceRegs.R8", Field, 0}, + {"PtraceRegs.R9", Field, 0}, + {"PtraceRegs.Rax", Field, 0}, + {"PtraceRegs.Rbp", Field, 0}, + {"PtraceRegs.Rbx", Field, 0}, + {"PtraceRegs.Rcx", Field, 0}, + {"PtraceRegs.Rdi", Field, 0}, + {"PtraceRegs.Rdx", Field, 0}, + {"PtraceRegs.Rip", Field, 0}, + {"PtraceRegs.Rsi", Field, 0}, + {"PtraceRegs.Rsp", Field, 0}, + {"PtraceRegs.Ss", Field, 0}, + {"PtraceRegs.Uregs", Field, 0}, + {"PtraceRegs.Xcs", Field, 0}, + {"PtraceRegs.Xds", Field, 0}, + {"PtraceRegs.Xes", Field, 0}, + {"PtraceRegs.Xfs", Field, 0}, + {"PtraceRegs.Xgs", Field, 0}, + {"PtraceRegs.Xss", Field, 0}, + {"PtraceSetOptions", Func, 0}, + {"PtraceSetRegs", Func, 0}, + {"PtraceSingleStep", Func, 0}, + {"PtraceSyscall", Func, 1}, + {"Pwrite", Func, 0}, + {"REG_BINARY", Const, 0}, + {"REG_DWORD", Const, 0}, + {"REG_DWORD_BIG_ENDIAN", Const, 0}, + {"REG_DWORD_LITTLE_ENDIAN", Const, 0}, + {"REG_EXPAND_SZ", Const, 0}, + {"REG_FULL_RESOURCE_DESCRIPTOR", Const, 0}, + {"REG_LINK", Const, 0}, + {"REG_MULTI_SZ", Const, 0}, + {"REG_NONE", Const, 0}, + {"REG_QWORD", Const, 0}, + {"REG_QWORD_LITTLE_ENDIAN", Const, 0}, + {"REG_RESOURCE_LIST", Const, 0}, + {"REG_RESOURCE_REQUIREMENTS_LIST", Const, 0}, + {"REG_SZ", Const, 0}, + {"RLIMIT_AS", Const, 0}, + {"RLIMIT_CORE", Const, 0}, + {"RLIMIT_CPU", Const, 0}, + {"RLIMIT_CPU_USAGE_MONITOR", Const, 16}, + {"RLIMIT_DATA", Const, 0}, + {"RLIMIT_FSIZE", Const, 0}, + {"RLIMIT_NOFILE", Const, 0}, + {"RLIMIT_STACK", Const, 0}, + {"RLIM_INFINITY", Const, 0}, + {"RTAX_ADVMSS", Const, 0}, + {"RTAX_AUTHOR", Const, 0}, + {"RTAX_BRD", Const, 0}, + {"RTAX_CWND", Const, 0}, + {"RTAX_DST", Const, 0}, + {"RTAX_FEATURES", Const, 0}, + {"RTAX_FEATURE_ALLFRAG", Const, 0}, + {"RTAX_FEATURE_ECN", Const, 0}, + {"RTAX_FEATURE_SACK", Const, 0}, + {"RTAX_FEATURE_TIMESTAMP", Const, 0}, + {"RTAX_GATEWAY", Const, 0}, + {"RTAX_GENMASK", Const, 0}, + {"RTAX_HOPLIMIT", Const, 0}, + {"RTAX_IFA", Const, 0}, + {"RTAX_IFP", Const, 0}, + {"RTAX_INITCWND", Const, 0}, + {"RTAX_INITRWND", Const, 0}, + {"RTAX_LABEL", Const, 1}, + {"RTAX_LOCK", Const, 0}, + {"RTAX_MAX", Const, 0}, + {"RTAX_MTU", Const, 0}, + {"RTAX_NETMASK", Const, 0}, + {"RTAX_REORDERING", Const, 0}, + {"RTAX_RTO_MIN", Const, 0}, + {"RTAX_RTT", Const, 0}, + {"RTAX_RTTVAR", Const, 0}, + {"RTAX_SRC", Const, 1}, + {"RTAX_SRCMASK", Const, 1}, + {"RTAX_SSTHRESH", Const, 0}, + {"RTAX_TAG", Const, 1}, + {"RTAX_UNSPEC", Const, 0}, + {"RTAX_WINDOW", Const, 0}, + {"RTA_ALIGNTO", Const, 0}, + {"RTA_AUTHOR", Const, 0}, + {"RTA_BRD", Const, 0}, + {"RTA_CACHEINFO", Const, 0}, + {"RTA_DST", Const, 0}, + {"RTA_FLOW", Const, 0}, + {"RTA_GATEWAY", Const, 0}, + {"RTA_GENMASK", Const, 0}, + {"RTA_IFA", Const, 0}, + {"RTA_IFP", Const, 0}, + {"RTA_IIF", Const, 0}, + {"RTA_LABEL", Const, 1}, + {"RTA_MAX", Const, 0}, + {"RTA_METRICS", Const, 0}, + {"RTA_MULTIPATH", Const, 0}, + {"RTA_NETMASK", Const, 0}, + {"RTA_OIF", Const, 0}, + {"RTA_PREFSRC", Const, 0}, + {"RTA_PRIORITY", Const, 0}, + {"RTA_SRC", Const, 0}, + {"RTA_SRCMASK", Const, 1}, + {"RTA_TABLE", Const, 0}, + {"RTA_TAG", Const, 1}, + {"RTA_UNSPEC", Const, 0}, + {"RTCF_DIRECTSRC", Const, 0}, + {"RTCF_DOREDIRECT", Const, 0}, + {"RTCF_LOG", Const, 0}, + {"RTCF_MASQ", Const, 0}, + {"RTCF_NAT", Const, 0}, + {"RTCF_VALVE", Const, 0}, + {"RTF_ADDRCLASSMASK", Const, 0}, + {"RTF_ADDRCONF", Const, 0}, + {"RTF_ALLONLINK", Const, 0}, + {"RTF_ANNOUNCE", Const, 1}, + {"RTF_BLACKHOLE", Const, 0}, + {"RTF_BROADCAST", Const, 0}, + {"RTF_CACHE", Const, 0}, + {"RTF_CLONED", Const, 1}, + {"RTF_CLONING", Const, 0}, + {"RTF_CONDEMNED", Const, 0}, + {"RTF_DEFAULT", Const, 0}, + {"RTF_DELCLONE", Const, 0}, + {"RTF_DONE", Const, 0}, + {"RTF_DYNAMIC", Const, 0}, + {"RTF_FLOW", Const, 0}, + {"RTF_FMASK", Const, 0}, + {"RTF_GATEWAY", Const, 0}, + {"RTF_GWFLAG_COMPAT", Const, 3}, + {"RTF_HOST", Const, 0}, + {"RTF_IFREF", Const, 0}, + {"RTF_IFSCOPE", Const, 0}, + {"RTF_INTERFACE", Const, 0}, + {"RTF_IRTT", Const, 0}, + {"RTF_LINKRT", Const, 0}, + {"RTF_LLDATA", Const, 0}, + {"RTF_LLINFO", Const, 0}, + {"RTF_LOCAL", Const, 0}, + {"RTF_MASK", Const, 1}, + {"RTF_MODIFIED", Const, 0}, + {"RTF_MPATH", Const, 1}, + {"RTF_MPLS", Const, 1}, + {"RTF_MSS", Const, 0}, + {"RTF_MTU", Const, 0}, + {"RTF_MULTICAST", Const, 0}, + {"RTF_NAT", Const, 0}, + {"RTF_NOFORWARD", Const, 0}, + {"RTF_NONEXTHOP", Const, 0}, + {"RTF_NOPMTUDISC", Const, 0}, + {"RTF_PERMANENT_ARP", Const, 1}, + {"RTF_PINNED", Const, 0}, + {"RTF_POLICY", Const, 0}, + {"RTF_PRCLONING", Const, 0}, + {"RTF_PROTO1", Const, 0}, + {"RTF_PROTO2", Const, 0}, + {"RTF_PROTO3", Const, 0}, + {"RTF_PROXY", Const, 16}, + {"RTF_REINSTATE", Const, 0}, + {"RTF_REJECT", Const, 0}, + {"RTF_RNH_LOCKED", Const, 0}, + {"RTF_ROUTER", Const, 16}, + {"RTF_SOURCE", Const, 1}, + {"RTF_SRC", Const, 1}, + {"RTF_STATIC", Const, 0}, + {"RTF_STICKY", Const, 0}, + {"RTF_THROW", Const, 0}, + {"RTF_TUNNEL", Const, 1}, + {"RTF_UP", Const, 0}, + {"RTF_USETRAILERS", Const, 1}, + {"RTF_WASCLONED", Const, 0}, + {"RTF_WINDOW", Const, 0}, + {"RTF_XRESOLVE", Const, 0}, + {"RTM_ADD", Const, 0}, + {"RTM_BASE", Const, 0}, + {"RTM_CHANGE", Const, 0}, + {"RTM_CHGADDR", Const, 1}, + {"RTM_DELACTION", Const, 0}, + {"RTM_DELADDR", Const, 0}, + {"RTM_DELADDRLABEL", Const, 0}, + {"RTM_DELETE", Const, 0}, + {"RTM_DELLINK", Const, 0}, + {"RTM_DELMADDR", Const, 0}, + {"RTM_DELNEIGH", Const, 0}, + {"RTM_DELQDISC", Const, 0}, + {"RTM_DELROUTE", Const, 0}, + {"RTM_DELRULE", Const, 0}, + {"RTM_DELTCLASS", Const, 0}, + {"RTM_DELTFILTER", Const, 0}, + {"RTM_DESYNC", Const, 1}, + {"RTM_F_CLONED", Const, 0}, + {"RTM_F_EQUALIZE", Const, 0}, + {"RTM_F_NOTIFY", Const, 0}, + {"RTM_F_PREFIX", Const, 0}, + {"RTM_GET", Const, 0}, + {"RTM_GET2", Const, 0}, + {"RTM_GETACTION", Const, 0}, + {"RTM_GETADDR", Const, 0}, + {"RTM_GETADDRLABEL", Const, 0}, + {"RTM_GETANYCAST", Const, 0}, + {"RTM_GETDCB", Const, 0}, + {"RTM_GETLINK", Const, 0}, + {"RTM_GETMULTICAST", Const, 0}, + {"RTM_GETNEIGH", Const, 0}, + {"RTM_GETNEIGHTBL", Const, 0}, + {"RTM_GETQDISC", Const, 0}, + {"RTM_GETROUTE", Const, 0}, + {"RTM_GETRULE", Const, 0}, + {"RTM_GETTCLASS", Const, 0}, + {"RTM_GETTFILTER", Const, 0}, + {"RTM_IEEE80211", Const, 0}, + {"RTM_IFANNOUNCE", Const, 0}, + {"RTM_IFINFO", Const, 0}, + {"RTM_IFINFO2", Const, 0}, + {"RTM_LLINFO_UPD", Const, 1}, + {"RTM_LOCK", Const, 0}, + {"RTM_LOSING", Const, 0}, + {"RTM_MAX", Const, 0}, + {"RTM_MAXSIZE", Const, 1}, + {"RTM_MISS", Const, 0}, + {"RTM_NEWACTION", Const, 0}, + {"RTM_NEWADDR", Const, 0}, + {"RTM_NEWADDRLABEL", Const, 0}, + {"RTM_NEWLINK", Const, 0}, + {"RTM_NEWMADDR", Const, 0}, + {"RTM_NEWMADDR2", Const, 0}, + {"RTM_NEWNDUSEROPT", Const, 0}, + {"RTM_NEWNEIGH", Const, 0}, + {"RTM_NEWNEIGHTBL", Const, 0}, + {"RTM_NEWPREFIX", Const, 0}, + {"RTM_NEWQDISC", Const, 0}, + {"RTM_NEWROUTE", Const, 0}, + {"RTM_NEWRULE", Const, 0}, + {"RTM_NEWTCLASS", Const, 0}, + {"RTM_NEWTFILTER", Const, 0}, + {"RTM_NR_FAMILIES", Const, 0}, + {"RTM_NR_MSGTYPES", Const, 0}, + {"RTM_OIFINFO", Const, 1}, + {"RTM_OLDADD", Const, 0}, + {"RTM_OLDDEL", Const, 0}, + {"RTM_OOIFINFO", Const, 1}, + {"RTM_REDIRECT", Const, 0}, + {"RTM_RESOLVE", Const, 0}, + {"RTM_RTTUNIT", Const, 0}, + {"RTM_SETDCB", Const, 0}, + {"RTM_SETGATE", Const, 1}, + {"RTM_SETLINK", Const, 0}, + {"RTM_SETNEIGHTBL", Const, 0}, + {"RTM_VERSION", Const, 0}, + {"RTNH_ALIGNTO", Const, 0}, + {"RTNH_F_DEAD", Const, 0}, + {"RTNH_F_ONLINK", Const, 0}, + {"RTNH_F_PERVASIVE", Const, 0}, + {"RTNLGRP_IPV4_IFADDR", Const, 1}, + {"RTNLGRP_IPV4_MROUTE", Const, 1}, + {"RTNLGRP_IPV4_ROUTE", Const, 1}, + {"RTNLGRP_IPV4_RULE", Const, 1}, + {"RTNLGRP_IPV6_IFADDR", Const, 1}, + {"RTNLGRP_IPV6_IFINFO", Const, 1}, + {"RTNLGRP_IPV6_MROUTE", Const, 1}, + {"RTNLGRP_IPV6_PREFIX", Const, 1}, + {"RTNLGRP_IPV6_ROUTE", Const, 1}, + {"RTNLGRP_IPV6_RULE", Const, 1}, + {"RTNLGRP_LINK", Const, 1}, + {"RTNLGRP_ND_USEROPT", Const, 1}, + {"RTNLGRP_NEIGH", Const, 1}, + {"RTNLGRP_NONE", Const, 1}, + {"RTNLGRP_NOTIFY", Const, 1}, + {"RTNLGRP_TC", Const, 1}, + {"RTN_ANYCAST", Const, 0}, + {"RTN_BLACKHOLE", Const, 0}, + {"RTN_BROADCAST", Const, 0}, + {"RTN_LOCAL", Const, 0}, + {"RTN_MAX", Const, 0}, + {"RTN_MULTICAST", Const, 0}, + {"RTN_NAT", Const, 0}, + {"RTN_PROHIBIT", Const, 0}, + {"RTN_THROW", Const, 0}, + {"RTN_UNICAST", Const, 0}, + {"RTN_UNREACHABLE", Const, 0}, + {"RTN_UNSPEC", Const, 0}, + {"RTN_XRESOLVE", Const, 0}, + {"RTPROT_BIRD", Const, 0}, + {"RTPROT_BOOT", Const, 0}, + {"RTPROT_DHCP", Const, 0}, + {"RTPROT_DNROUTED", Const, 0}, + {"RTPROT_GATED", Const, 0}, + {"RTPROT_KERNEL", Const, 0}, + {"RTPROT_MRT", Const, 0}, + {"RTPROT_NTK", Const, 0}, + {"RTPROT_RA", Const, 0}, + {"RTPROT_REDIRECT", Const, 0}, + {"RTPROT_STATIC", Const, 0}, + {"RTPROT_UNSPEC", Const, 0}, + {"RTPROT_XORP", Const, 0}, + {"RTPROT_ZEBRA", Const, 0}, + {"RTV_EXPIRE", Const, 0}, + {"RTV_HOPCOUNT", Const, 0}, + {"RTV_MTU", Const, 0}, + {"RTV_RPIPE", Const, 0}, + {"RTV_RTT", Const, 0}, + {"RTV_RTTVAR", Const, 0}, + {"RTV_SPIPE", Const, 0}, + {"RTV_SSTHRESH", Const, 0}, + {"RTV_WEIGHT", Const, 0}, + {"RT_CACHING_CONTEXT", Const, 1}, + {"RT_CLASS_DEFAULT", Const, 0}, + {"RT_CLASS_LOCAL", Const, 0}, + {"RT_CLASS_MAIN", Const, 0}, + {"RT_CLASS_MAX", Const, 0}, + {"RT_CLASS_UNSPEC", Const, 0}, + {"RT_DEFAULT_FIB", Const, 1}, + {"RT_NORTREF", Const, 1}, + {"RT_SCOPE_HOST", Const, 0}, + {"RT_SCOPE_LINK", Const, 0}, + {"RT_SCOPE_NOWHERE", Const, 0}, + {"RT_SCOPE_SITE", Const, 0}, + {"RT_SCOPE_UNIVERSE", Const, 0}, + {"RT_TABLEID_MAX", Const, 1}, + {"RT_TABLE_COMPAT", Const, 0}, + {"RT_TABLE_DEFAULT", Const, 0}, + {"RT_TABLE_LOCAL", Const, 0}, + {"RT_TABLE_MAIN", Const, 0}, + {"RT_TABLE_MAX", Const, 0}, + {"RT_TABLE_UNSPEC", Const, 0}, + {"RUSAGE_CHILDREN", Const, 0}, + {"RUSAGE_SELF", Const, 0}, + {"RUSAGE_THREAD", Const, 0}, + {"Radvisory_t", Type, 0}, + {"Radvisory_t.Count", Field, 0}, + {"Radvisory_t.Offset", Field, 0}, + {"Radvisory_t.Pad_cgo_0", Field, 0}, + {"RawConn", Type, 9}, + {"RawSockaddr", Type, 0}, + {"RawSockaddr.Data", Field, 0}, + {"RawSockaddr.Family", Field, 0}, + {"RawSockaddr.Len", Field, 0}, + {"RawSockaddrAny", Type, 0}, + {"RawSockaddrAny.Addr", Field, 0}, + {"RawSockaddrAny.Pad", Field, 0}, + {"RawSockaddrDatalink", Type, 0}, + {"RawSockaddrDatalink.Alen", Field, 0}, + {"RawSockaddrDatalink.Data", Field, 0}, + {"RawSockaddrDatalink.Family", Field, 0}, + {"RawSockaddrDatalink.Index", Field, 0}, + {"RawSockaddrDatalink.Len", Field, 0}, + {"RawSockaddrDatalink.Nlen", Field, 0}, + {"RawSockaddrDatalink.Pad_cgo_0", Field, 2}, + {"RawSockaddrDatalink.Slen", Field, 0}, + {"RawSockaddrDatalink.Type", Field, 0}, + {"RawSockaddrInet4", Type, 0}, + {"RawSockaddrInet4.Addr", Field, 0}, + {"RawSockaddrInet4.Family", Field, 0}, + {"RawSockaddrInet4.Len", Field, 0}, + {"RawSockaddrInet4.Port", Field, 0}, + {"RawSockaddrInet4.Zero", Field, 0}, + {"RawSockaddrInet6", Type, 0}, + {"RawSockaddrInet6.Addr", Field, 0}, + {"RawSockaddrInet6.Family", Field, 0}, + {"RawSockaddrInet6.Flowinfo", Field, 0}, + {"RawSockaddrInet6.Len", Field, 0}, + {"RawSockaddrInet6.Port", Field, 0}, + {"RawSockaddrInet6.Scope_id", Field, 0}, + {"RawSockaddrLinklayer", Type, 0}, + {"RawSockaddrLinklayer.Addr", Field, 0}, + {"RawSockaddrLinklayer.Family", Field, 0}, + {"RawSockaddrLinklayer.Halen", Field, 0}, + {"RawSockaddrLinklayer.Hatype", Field, 0}, + {"RawSockaddrLinklayer.Ifindex", Field, 0}, + {"RawSockaddrLinklayer.Pkttype", Field, 0}, + {"RawSockaddrLinklayer.Protocol", Field, 0}, + {"RawSockaddrNetlink", Type, 0}, + {"RawSockaddrNetlink.Family", Field, 0}, + {"RawSockaddrNetlink.Groups", Field, 0}, + {"RawSockaddrNetlink.Pad", Field, 0}, + {"RawSockaddrNetlink.Pid", Field, 0}, + {"RawSockaddrUnix", Type, 0}, + {"RawSockaddrUnix.Family", Field, 0}, + {"RawSockaddrUnix.Len", Field, 0}, + {"RawSockaddrUnix.Pad_cgo_0", Field, 2}, + {"RawSockaddrUnix.Path", Field, 0}, + {"RawSyscall", Func, 0}, + {"RawSyscall6", Func, 0}, + {"Read", Func, 0}, + {"ReadConsole", Func, 1}, + {"ReadDirectoryChanges", Func, 0}, + {"ReadDirent", Func, 0}, + {"ReadFile", Func, 0}, + {"Readlink", Func, 0}, + {"Reboot", Func, 0}, + {"Recvfrom", Func, 0}, + {"Recvmsg", Func, 0}, + {"RegCloseKey", Func, 0}, + {"RegEnumKeyEx", Func, 0}, + {"RegOpenKeyEx", Func, 0}, + {"RegQueryInfoKey", Func, 0}, + {"RegQueryValueEx", Func, 0}, + {"RemoveDirectory", Func, 0}, + {"Removexattr", Func, 1}, + {"Rename", Func, 0}, + {"Renameat", Func, 0}, + {"Revoke", Func, 0}, + {"Rlimit", Type, 0}, + {"Rlimit.Cur", Field, 0}, + {"Rlimit.Max", Field, 0}, + {"Rmdir", Func, 0}, + {"RouteMessage", Type, 0}, + {"RouteMessage.Data", Field, 0}, + {"RouteMessage.Header", Field, 0}, + {"RouteRIB", Func, 0}, + {"RoutingMessage", Type, 0}, + {"RtAttr", Type, 0}, + {"RtAttr.Len", Field, 0}, + {"RtAttr.Type", Field, 0}, + {"RtGenmsg", Type, 0}, + {"RtGenmsg.Family", Field, 0}, + {"RtMetrics", Type, 0}, + {"RtMetrics.Expire", Field, 0}, + {"RtMetrics.Filler", Field, 0}, + {"RtMetrics.Hopcount", Field, 0}, + {"RtMetrics.Locks", Field, 0}, + {"RtMetrics.Mtu", Field, 0}, + {"RtMetrics.Pad", Field, 3}, + {"RtMetrics.Pksent", Field, 0}, + {"RtMetrics.Recvpipe", Field, 0}, + {"RtMetrics.Refcnt", Field, 2}, + {"RtMetrics.Rtt", Field, 0}, + {"RtMetrics.Rttvar", Field, 0}, + {"RtMetrics.Sendpipe", Field, 0}, + {"RtMetrics.Ssthresh", Field, 0}, + {"RtMetrics.Weight", Field, 0}, + {"RtMsg", Type, 0}, + {"RtMsg.Dst_len", Field, 0}, + {"RtMsg.Family", Field, 0}, + {"RtMsg.Flags", Field, 0}, + {"RtMsg.Protocol", Field, 0}, + {"RtMsg.Scope", Field, 0}, + {"RtMsg.Src_len", Field, 0}, + {"RtMsg.Table", Field, 0}, + {"RtMsg.Tos", Field, 0}, + {"RtMsg.Type", Field, 0}, + {"RtMsghdr", Type, 0}, + {"RtMsghdr.Addrs", Field, 0}, + {"RtMsghdr.Errno", Field, 0}, + {"RtMsghdr.Flags", Field, 0}, + {"RtMsghdr.Fmask", Field, 0}, + {"RtMsghdr.Hdrlen", Field, 2}, + {"RtMsghdr.Index", Field, 0}, + {"RtMsghdr.Inits", Field, 0}, + {"RtMsghdr.Mpls", Field, 2}, + {"RtMsghdr.Msglen", Field, 0}, + {"RtMsghdr.Pad_cgo_0", Field, 0}, + {"RtMsghdr.Pad_cgo_1", Field, 2}, + {"RtMsghdr.Pid", Field, 0}, + {"RtMsghdr.Priority", Field, 2}, + {"RtMsghdr.Rmx", Field, 0}, + {"RtMsghdr.Seq", Field, 0}, + {"RtMsghdr.Tableid", Field, 2}, + {"RtMsghdr.Type", Field, 0}, + {"RtMsghdr.Use", Field, 0}, + {"RtMsghdr.Version", Field, 0}, + {"RtNexthop", Type, 0}, + {"RtNexthop.Flags", Field, 0}, + {"RtNexthop.Hops", Field, 0}, + {"RtNexthop.Ifindex", Field, 0}, + {"RtNexthop.Len", Field, 0}, + {"Rusage", Type, 0}, + {"Rusage.CreationTime", Field, 0}, + {"Rusage.ExitTime", Field, 0}, + {"Rusage.Idrss", Field, 0}, + {"Rusage.Inblock", Field, 0}, + {"Rusage.Isrss", Field, 0}, + {"Rusage.Ixrss", Field, 0}, + {"Rusage.KernelTime", Field, 0}, + {"Rusage.Majflt", Field, 0}, + {"Rusage.Maxrss", Field, 0}, + {"Rusage.Minflt", Field, 0}, + {"Rusage.Msgrcv", Field, 0}, + {"Rusage.Msgsnd", Field, 0}, + {"Rusage.Nivcsw", Field, 0}, + {"Rusage.Nsignals", Field, 0}, + {"Rusage.Nswap", Field, 0}, + {"Rusage.Nvcsw", Field, 0}, + {"Rusage.Oublock", Field, 0}, + {"Rusage.Stime", Field, 0}, + {"Rusage.UserTime", Field, 0}, + {"Rusage.Utime", Field, 0}, + {"SCM_BINTIME", Const, 0}, + {"SCM_CREDENTIALS", Const, 0}, + {"SCM_CREDS", Const, 0}, + {"SCM_RIGHTS", Const, 0}, + {"SCM_TIMESTAMP", Const, 0}, + {"SCM_TIMESTAMPING", Const, 0}, + {"SCM_TIMESTAMPNS", Const, 0}, + {"SCM_TIMESTAMP_MONOTONIC", Const, 0}, + {"SHUT_RD", Const, 0}, + {"SHUT_RDWR", Const, 0}, + {"SHUT_WR", Const, 0}, + {"SID", Type, 0}, + {"SIDAndAttributes", Type, 0}, + {"SIDAndAttributes.Attributes", Field, 0}, + {"SIDAndAttributes.Sid", Field, 0}, + {"SIGABRT", Const, 0}, + {"SIGALRM", Const, 0}, + {"SIGBUS", Const, 0}, + {"SIGCHLD", Const, 0}, + {"SIGCLD", Const, 0}, + {"SIGCONT", Const, 0}, + {"SIGEMT", Const, 0}, + {"SIGFPE", Const, 0}, + {"SIGHUP", Const, 0}, + {"SIGILL", Const, 0}, + {"SIGINFO", Const, 0}, + {"SIGINT", Const, 0}, + {"SIGIO", Const, 0}, + {"SIGIOT", Const, 0}, + {"SIGKILL", Const, 0}, + {"SIGLIBRT", Const, 1}, + {"SIGLWP", Const, 0}, + {"SIGPIPE", Const, 0}, + {"SIGPOLL", Const, 0}, + {"SIGPROF", Const, 0}, + {"SIGPWR", Const, 0}, + {"SIGQUIT", Const, 0}, + {"SIGSEGV", Const, 0}, + {"SIGSTKFLT", Const, 0}, + {"SIGSTOP", Const, 0}, + {"SIGSYS", Const, 0}, + {"SIGTERM", Const, 0}, + {"SIGTHR", Const, 0}, + {"SIGTRAP", Const, 0}, + {"SIGTSTP", Const, 0}, + {"SIGTTIN", Const, 0}, + {"SIGTTOU", Const, 0}, + {"SIGUNUSED", Const, 0}, + {"SIGURG", Const, 0}, + {"SIGUSR1", Const, 0}, + {"SIGUSR2", Const, 0}, + {"SIGVTALRM", Const, 0}, + {"SIGWINCH", Const, 0}, + {"SIGXCPU", Const, 0}, + {"SIGXFSZ", Const, 0}, + {"SIOCADDDLCI", Const, 0}, + {"SIOCADDMULTI", Const, 0}, + {"SIOCADDRT", Const, 0}, + {"SIOCAIFADDR", Const, 0}, + {"SIOCAIFGROUP", Const, 0}, + {"SIOCALIFADDR", Const, 0}, + {"SIOCARPIPLL", Const, 0}, + {"SIOCATMARK", Const, 0}, + {"SIOCAUTOADDR", Const, 0}, + {"SIOCAUTONETMASK", Const, 0}, + {"SIOCBRDGADD", Const, 1}, + {"SIOCBRDGADDS", Const, 1}, + {"SIOCBRDGARL", Const, 1}, + {"SIOCBRDGDADDR", Const, 1}, + {"SIOCBRDGDEL", Const, 1}, + {"SIOCBRDGDELS", Const, 1}, + {"SIOCBRDGFLUSH", Const, 1}, + {"SIOCBRDGFRL", Const, 1}, + {"SIOCBRDGGCACHE", Const, 1}, + {"SIOCBRDGGFD", Const, 1}, + {"SIOCBRDGGHT", Const, 1}, + {"SIOCBRDGGIFFLGS", Const, 1}, + {"SIOCBRDGGMA", Const, 1}, + {"SIOCBRDGGPARAM", Const, 1}, + {"SIOCBRDGGPRI", Const, 1}, + {"SIOCBRDGGRL", Const, 1}, + {"SIOCBRDGGSIFS", Const, 1}, + {"SIOCBRDGGTO", Const, 1}, + {"SIOCBRDGIFS", Const, 1}, + {"SIOCBRDGRTS", Const, 1}, + {"SIOCBRDGSADDR", Const, 1}, + {"SIOCBRDGSCACHE", Const, 1}, + {"SIOCBRDGSFD", Const, 1}, + {"SIOCBRDGSHT", Const, 1}, + {"SIOCBRDGSIFCOST", Const, 1}, + {"SIOCBRDGSIFFLGS", Const, 1}, + {"SIOCBRDGSIFPRIO", Const, 1}, + {"SIOCBRDGSMA", Const, 1}, + {"SIOCBRDGSPRI", Const, 1}, + {"SIOCBRDGSPROTO", Const, 1}, + {"SIOCBRDGSTO", Const, 1}, + {"SIOCBRDGSTXHC", Const, 1}, + {"SIOCDARP", Const, 0}, + {"SIOCDELDLCI", Const, 0}, + {"SIOCDELMULTI", Const, 0}, + {"SIOCDELRT", Const, 0}, + {"SIOCDEVPRIVATE", Const, 0}, + {"SIOCDIFADDR", Const, 0}, + {"SIOCDIFGROUP", Const, 0}, + {"SIOCDIFPHYADDR", Const, 0}, + {"SIOCDLIFADDR", Const, 0}, + {"SIOCDRARP", Const, 0}, + {"SIOCGARP", Const, 0}, + {"SIOCGDRVSPEC", Const, 0}, + {"SIOCGETKALIVE", Const, 1}, + {"SIOCGETLABEL", Const, 1}, + {"SIOCGETPFLOW", Const, 1}, + {"SIOCGETPFSYNC", Const, 1}, + {"SIOCGETSGCNT", Const, 0}, + {"SIOCGETVIFCNT", Const, 0}, + {"SIOCGETVLAN", Const, 0}, + {"SIOCGHIWAT", Const, 0}, + {"SIOCGIFADDR", Const, 0}, + {"SIOCGIFADDRPREF", Const, 1}, + {"SIOCGIFALIAS", Const, 1}, + {"SIOCGIFALTMTU", Const, 0}, + {"SIOCGIFASYNCMAP", Const, 0}, + {"SIOCGIFBOND", Const, 0}, + {"SIOCGIFBR", Const, 0}, + {"SIOCGIFBRDADDR", Const, 0}, + {"SIOCGIFCAP", Const, 0}, + {"SIOCGIFCONF", Const, 0}, + {"SIOCGIFCOUNT", Const, 0}, + {"SIOCGIFDATA", Const, 1}, + {"SIOCGIFDESCR", Const, 0}, + {"SIOCGIFDEVMTU", Const, 0}, + {"SIOCGIFDLT", Const, 1}, + {"SIOCGIFDSTADDR", Const, 0}, + {"SIOCGIFENCAP", Const, 0}, + {"SIOCGIFFIB", Const, 1}, + {"SIOCGIFFLAGS", Const, 0}, + {"SIOCGIFGATTR", Const, 1}, + {"SIOCGIFGENERIC", Const, 0}, + {"SIOCGIFGMEMB", Const, 0}, + {"SIOCGIFGROUP", Const, 0}, + {"SIOCGIFHARDMTU", Const, 3}, + {"SIOCGIFHWADDR", Const, 0}, + {"SIOCGIFINDEX", Const, 0}, + {"SIOCGIFKPI", Const, 0}, + {"SIOCGIFMAC", Const, 0}, + {"SIOCGIFMAP", Const, 0}, + {"SIOCGIFMEDIA", Const, 0}, + {"SIOCGIFMEM", Const, 0}, + {"SIOCGIFMETRIC", Const, 0}, + {"SIOCGIFMTU", Const, 0}, + {"SIOCGIFNAME", Const, 0}, + {"SIOCGIFNETMASK", Const, 0}, + {"SIOCGIFPDSTADDR", Const, 0}, + {"SIOCGIFPFLAGS", Const, 0}, + {"SIOCGIFPHYS", Const, 0}, + {"SIOCGIFPRIORITY", Const, 1}, + {"SIOCGIFPSRCADDR", Const, 0}, + {"SIOCGIFRDOMAIN", Const, 1}, + {"SIOCGIFRTLABEL", Const, 1}, + {"SIOCGIFSLAVE", Const, 0}, + {"SIOCGIFSTATUS", Const, 0}, + {"SIOCGIFTIMESLOT", Const, 1}, + {"SIOCGIFTXQLEN", Const, 0}, + {"SIOCGIFVLAN", Const, 0}, + {"SIOCGIFWAKEFLAGS", Const, 0}, + {"SIOCGIFXFLAGS", Const, 1}, + {"SIOCGLIFADDR", Const, 0}, + {"SIOCGLIFPHYADDR", Const, 0}, + {"SIOCGLIFPHYRTABLE", Const, 1}, + {"SIOCGLIFPHYTTL", Const, 3}, + {"SIOCGLINKSTR", Const, 1}, + {"SIOCGLOWAT", Const, 0}, + {"SIOCGPGRP", Const, 0}, + {"SIOCGPRIVATE_0", Const, 0}, + {"SIOCGPRIVATE_1", Const, 0}, + {"SIOCGRARP", Const, 0}, + {"SIOCGSPPPPARAMS", Const, 3}, + {"SIOCGSTAMP", Const, 0}, + {"SIOCGSTAMPNS", Const, 0}, + {"SIOCGVH", Const, 1}, + {"SIOCGVNETID", Const, 3}, + {"SIOCIFCREATE", Const, 0}, + {"SIOCIFCREATE2", Const, 0}, + {"SIOCIFDESTROY", Const, 0}, + {"SIOCIFGCLONERS", Const, 0}, + {"SIOCINITIFADDR", Const, 1}, + {"SIOCPROTOPRIVATE", Const, 0}, + {"SIOCRSLVMULTI", Const, 0}, + {"SIOCRTMSG", Const, 0}, + {"SIOCSARP", Const, 0}, + {"SIOCSDRVSPEC", Const, 0}, + {"SIOCSETKALIVE", Const, 1}, + {"SIOCSETLABEL", Const, 1}, + {"SIOCSETPFLOW", Const, 1}, + {"SIOCSETPFSYNC", Const, 1}, + {"SIOCSETVLAN", Const, 0}, + {"SIOCSHIWAT", Const, 0}, + {"SIOCSIFADDR", Const, 0}, + {"SIOCSIFADDRPREF", Const, 1}, + {"SIOCSIFALTMTU", Const, 0}, + {"SIOCSIFASYNCMAP", Const, 0}, + {"SIOCSIFBOND", Const, 0}, + {"SIOCSIFBR", Const, 0}, + {"SIOCSIFBRDADDR", Const, 0}, + {"SIOCSIFCAP", Const, 0}, + {"SIOCSIFDESCR", Const, 0}, + {"SIOCSIFDSTADDR", Const, 0}, + {"SIOCSIFENCAP", Const, 0}, + {"SIOCSIFFIB", Const, 1}, + {"SIOCSIFFLAGS", Const, 0}, + {"SIOCSIFGATTR", Const, 1}, + {"SIOCSIFGENERIC", Const, 0}, + {"SIOCSIFHWADDR", Const, 0}, + {"SIOCSIFHWBROADCAST", Const, 0}, + {"SIOCSIFKPI", Const, 0}, + {"SIOCSIFLINK", Const, 0}, + {"SIOCSIFLLADDR", Const, 0}, + {"SIOCSIFMAC", Const, 0}, + {"SIOCSIFMAP", Const, 0}, + {"SIOCSIFMEDIA", Const, 0}, + {"SIOCSIFMEM", Const, 0}, + {"SIOCSIFMETRIC", Const, 0}, + {"SIOCSIFMTU", Const, 0}, + {"SIOCSIFNAME", Const, 0}, + {"SIOCSIFNETMASK", Const, 0}, + {"SIOCSIFPFLAGS", Const, 0}, + {"SIOCSIFPHYADDR", Const, 0}, + {"SIOCSIFPHYS", Const, 0}, + {"SIOCSIFPRIORITY", Const, 1}, + {"SIOCSIFRDOMAIN", Const, 1}, + {"SIOCSIFRTLABEL", Const, 1}, + {"SIOCSIFRVNET", Const, 0}, + {"SIOCSIFSLAVE", Const, 0}, + {"SIOCSIFTIMESLOT", Const, 1}, + {"SIOCSIFTXQLEN", Const, 0}, + {"SIOCSIFVLAN", Const, 0}, + {"SIOCSIFVNET", Const, 0}, + {"SIOCSIFXFLAGS", Const, 1}, + {"SIOCSLIFPHYADDR", Const, 0}, + {"SIOCSLIFPHYRTABLE", Const, 1}, + {"SIOCSLIFPHYTTL", Const, 3}, + {"SIOCSLINKSTR", Const, 1}, + {"SIOCSLOWAT", Const, 0}, + {"SIOCSPGRP", Const, 0}, + {"SIOCSRARP", Const, 0}, + {"SIOCSSPPPPARAMS", Const, 3}, + {"SIOCSVH", Const, 1}, + {"SIOCSVNETID", Const, 3}, + {"SIOCZIFDATA", Const, 1}, + {"SIO_GET_EXTENSION_FUNCTION_POINTER", Const, 1}, + {"SIO_GET_INTERFACE_LIST", Const, 0}, + {"SIO_KEEPALIVE_VALS", Const, 3}, + {"SIO_UDP_CONNRESET", Const, 4}, + {"SOCK_CLOEXEC", Const, 0}, + {"SOCK_DCCP", Const, 0}, + {"SOCK_DGRAM", Const, 0}, + {"SOCK_FLAGS_MASK", Const, 1}, + {"SOCK_MAXADDRLEN", Const, 0}, + {"SOCK_NONBLOCK", Const, 0}, + {"SOCK_NOSIGPIPE", Const, 1}, + {"SOCK_PACKET", Const, 0}, + {"SOCK_RAW", Const, 0}, + {"SOCK_RDM", Const, 0}, + {"SOCK_SEQPACKET", Const, 0}, + {"SOCK_STREAM", Const, 0}, + {"SOL_AAL", Const, 0}, + {"SOL_ATM", Const, 0}, + {"SOL_DECNET", Const, 0}, + {"SOL_ICMPV6", Const, 0}, + {"SOL_IP", Const, 0}, + {"SOL_IPV6", Const, 0}, + {"SOL_IRDA", Const, 0}, + {"SOL_PACKET", Const, 0}, + {"SOL_RAW", Const, 0}, + {"SOL_SOCKET", Const, 0}, + {"SOL_TCP", Const, 0}, + {"SOL_X25", Const, 0}, + {"SOMAXCONN", Const, 0}, + {"SO_ACCEPTCONN", Const, 0}, + {"SO_ACCEPTFILTER", Const, 0}, + {"SO_ATTACH_FILTER", Const, 0}, + {"SO_BINDANY", Const, 1}, + {"SO_BINDTODEVICE", Const, 0}, + {"SO_BINTIME", Const, 0}, + {"SO_BROADCAST", Const, 0}, + {"SO_BSDCOMPAT", Const, 0}, + {"SO_DEBUG", Const, 0}, + {"SO_DETACH_FILTER", Const, 0}, + {"SO_DOMAIN", Const, 0}, + {"SO_DONTROUTE", Const, 0}, + {"SO_DONTTRUNC", Const, 0}, + {"SO_ERROR", Const, 0}, + {"SO_KEEPALIVE", Const, 0}, + {"SO_LABEL", Const, 0}, + {"SO_LINGER", Const, 0}, + {"SO_LINGER_SEC", Const, 0}, + {"SO_LISTENINCQLEN", Const, 0}, + {"SO_LISTENQLEN", Const, 0}, + {"SO_LISTENQLIMIT", Const, 0}, + {"SO_MARK", Const, 0}, + {"SO_NETPROC", Const, 1}, + {"SO_NKE", Const, 0}, + {"SO_NOADDRERR", Const, 0}, + {"SO_NOHEADER", Const, 1}, + {"SO_NOSIGPIPE", Const, 0}, + {"SO_NOTIFYCONFLICT", Const, 0}, + {"SO_NO_CHECK", Const, 0}, + {"SO_NO_DDP", Const, 0}, + {"SO_NO_OFFLOAD", Const, 0}, + {"SO_NP_EXTENSIONS", Const, 0}, + {"SO_NREAD", Const, 0}, + {"SO_NUMRCVPKT", Const, 16}, + {"SO_NWRITE", Const, 0}, + {"SO_OOBINLINE", Const, 0}, + {"SO_OVERFLOWED", Const, 1}, + {"SO_PASSCRED", Const, 0}, + {"SO_PASSSEC", Const, 0}, + {"SO_PEERCRED", Const, 0}, + {"SO_PEERLABEL", Const, 0}, + {"SO_PEERNAME", Const, 0}, + {"SO_PEERSEC", Const, 0}, + {"SO_PRIORITY", Const, 0}, + {"SO_PROTOCOL", Const, 0}, + {"SO_PROTOTYPE", Const, 1}, + {"SO_RANDOMPORT", Const, 0}, + {"SO_RCVBUF", Const, 0}, + {"SO_RCVBUFFORCE", Const, 0}, + {"SO_RCVLOWAT", Const, 0}, + {"SO_RCVTIMEO", Const, 0}, + {"SO_RESTRICTIONS", Const, 0}, + {"SO_RESTRICT_DENYIN", Const, 0}, + {"SO_RESTRICT_DENYOUT", Const, 0}, + {"SO_RESTRICT_DENYSET", Const, 0}, + {"SO_REUSEADDR", Const, 0}, + {"SO_REUSEPORT", Const, 0}, + {"SO_REUSESHAREUID", Const, 0}, + {"SO_RTABLE", Const, 1}, + {"SO_RXQ_OVFL", Const, 0}, + {"SO_SECURITY_AUTHENTICATION", Const, 0}, + {"SO_SECURITY_ENCRYPTION_NETWORK", Const, 0}, + {"SO_SECURITY_ENCRYPTION_TRANSPORT", Const, 0}, + {"SO_SETFIB", Const, 0}, + {"SO_SNDBUF", Const, 0}, + {"SO_SNDBUFFORCE", Const, 0}, + {"SO_SNDLOWAT", Const, 0}, + {"SO_SNDTIMEO", Const, 0}, + {"SO_SPLICE", Const, 1}, + {"SO_TIMESTAMP", Const, 0}, + {"SO_TIMESTAMPING", Const, 0}, + {"SO_TIMESTAMPNS", Const, 0}, + {"SO_TIMESTAMP_MONOTONIC", Const, 0}, + {"SO_TYPE", Const, 0}, + {"SO_UPCALLCLOSEWAIT", Const, 0}, + {"SO_UPDATE_ACCEPT_CONTEXT", Const, 0}, + {"SO_UPDATE_CONNECT_CONTEXT", Const, 1}, + {"SO_USELOOPBACK", Const, 0}, + {"SO_USER_COOKIE", Const, 1}, + {"SO_VENDOR", Const, 3}, + {"SO_WANTMORE", Const, 0}, + {"SO_WANTOOBFLAG", Const, 0}, + {"SSLExtraCertChainPolicyPara", Type, 0}, + {"SSLExtraCertChainPolicyPara.AuthType", Field, 0}, + {"SSLExtraCertChainPolicyPara.Checks", Field, 0}, + {"SSLExtraCertChainPolicyPara.ServerName", Field, 0}, + {"SSLExtraCertChainPolicyPara.Size", Field, 0}, + {"STANDARD_RIGHTS_ALL", Const, 0}, + {"STANDARD_RIGHTS_EXECUTE", Const, 0}, + {"STANDARD_RIGHTS_READ", Const, 0}, + {"STANDARD_RIGHTS_REQUIRED", Const, 0}, + {"STANDARD_RIGHTS_WRITE", Const, 0}, + {"STARTF_USESHOWWINDOW", Const, 0}, + {"STARTF_USESTDHANDLES", Const, 0}, + {"STD_ERROR_HANDLE", Const, 0}, + {"STD_INPUT_HANDLE", Const, 0}, + {"STD_OUTPUT_HANDLE", Const, 0}, + {"SUBLANG_ENGLISH_US", Const, 0}, + {"SW_FORCEMINIMIZE", Const, 0}, + {"SW_HIDE", Const, 0}, + {"SW_MAXIMIZE", Const, 0}, + {"SW_MINIMIZE", Const, 0}, + {"SW_NORMAL", Const, 0}, + {"SW_RESTORE", Const, 0}, + {"SW_SHOW", Const, 0}, + {"SW_SHOWDEFAULT", Const, 0}, + {"SW_SHOWMAXIMIZED", Const, 0}, + {"SW_SHOWMINIMIZED", Const, 0}, + {"SW_SHOWMINNOACTIVE", Const, 0}, + {"SW_SHOWNA", Const, 0}, + {"SW_SHOWNOACTIVATE", Const, 0}, + {"SW_SHOWNORMAL", Const, 0}, + {"SYMBOLIC_LINK_FLAG_DIRECTORY", Const, 4}, + {"SYNCHRONIZE", Const, 0}, + {"SYSCTL_VERSION", Const, 1}, + {"SYSCTL_VERS_0", Const, 1}, + {"SYSCTL_VERS_1", Const, 1}, + {"SYSCTL_VERS_MASK", Const, 1}, + {"SYS_ABORT2", Const, 0}, + {"SYS_ACCEPT", Const, 0}, + {"SYS_ACCEPT4", Const, 0}, + {"SYS_ACCEPT_NOCANCEL", Const, 0}, + {"SYS_ACCESS", Const, 0}, + {"SYS_ACCESS_EXTENDED", Const, 0}, + {"SYS_ACCT", Const, 0}, + {"SYS_ADD_KEY", Const, 0}, + {"SYS_ADD_PROFIL", Const, 0}, + {"SYS_ADJFREQ", Const, 1}, + {"SYS_ADJTIME", Const, 0}, + {"SYS_ADJTIMEX", Const, 0}, + {"SYS_AFS_SYSCALL", Const, 0}, + {"SYS_AIO_CANCEL", Const, 0}, + {"SYS_AIO_ERROR", Const, 0}, + {"SYS_AIO_FSYNC", Const, 0}, + {"SYS_AIO_MLOCK", Const, 14}, + {"SYS_AIO_READ", Const, 0}, + {"SYS_AIO_RETURN", Const, 0}, + {"SYS_AIO_SUSPEND", Const, 0}, + {"SYS_AIO_SUSPEND_NOCANCEL", Const, 0}, + {"SYS_AIO_WAITCOMPLETE", Const, 14}, + {"SYS_AIO_WRITE", Const, 0}, + {"SYS_ALARM", Const, 0}, + {"SYS_ARCH_PRCTL", Const, 0}, + {"SYS_ARM_FADVISE64_64", Const, 0}, + {"SYS_ARM_SYNC_FILE_RANGE", Const, 0}, + {"SYS_ATGETMSG", Const, 0}, + {"SYS_ATPGETREQ", Const, 0}, + {"SYS_ATPGETRSP", Const, 0}, + {"SYS_ATPSNDREQ", Const, 0}, + {"SYS_ATPSNDRSP", Const, 0}, + {"SYS_ATPUTMSG", Const, 0}, + {"SYS_ATSOCKET", Const, 0}, + {"SYS_AUDIT", Const, 0}, + {"SYS_AUDITCTL", Const, 0}, + {"SYS_AUDITON", Const, 0}, + {"SYS_AUDIT_SESSION_JOIN", Const, 0}, + {"SYS_AUDIT_SESSION_PORT", Const, 0}, + {"SYS_AUDIT_SESSION_SELF", Const, 0}, + {"SYS_BDFLUSH", Const, 0}, + {"SYS_BIND", Const, 0}, + {"SYS_BINDAT", Const, 3}, + {"SYS_BREAK", Const, 0}, + {"SYS_BRK", Const, 0}, + {"SYS_BSDTHREAD_CREATE", Const, 0}, + {"SYS_BSDTHREAD_REGISTER", Const, 0}, + {"SYS_BSDTHREAD_TERMINATE", Const, 0}, + {"SYS_CAPGET", Const, 0}, + {"SYS_CAPSET", Const, 0}, + {"SYS_CAP_ENTER", Const, 0}, + {"SYS_CAP_FCNTLS_GET", Const, 1}, + {"SYS_CAP_FCNTLS_LIMIT", Const, 1}, + {"SYS_CAP_GETMODE", Const, 0}, + {"SYS_CAP_GETRIGHTS", Const, 0}, + {"SYS_CAP_IOCTLS_GET", Const, 1}, + {"SYS_CAP_IOCTLS_LIMIT", Const, 1}, + {"SYS_CAP_NEW", Const, 0}, + {"SYS_CAP_RIGHTS_GET", Const, 1}, + {"SYS_CAP_RIGHTS_LIMIT", Const, 1}, + {"SYS_CHDIR", Const, 0}, + {"SYS_CHFLAGS", Const, 0}, + {"SYS_CHFLAGSAT", Const, 3}, + {"SYS_CHMOD", Const, 0}, + {"SYS_CHMOD_EXTENDED", Const, 0}, + {"SYS_CHOWN", Const, 0}, + {"SYS_CHOWN32", Const, 0}, + {"SYS_CHROOT", Const, 0}, + {"SYS_CHUD", Const, 0}, + {"SYS_CLOCK_ADJTIME", Const, 0}, + {"SYS_CLOCK_GETCPUCLOCKID2", Const, 1}, + {"SYS_CLOCK_GETRES", Const, 0}, + {"SYS_CLOCK_GETTIME", Const, 0}, + {"SYS_CLOCK_NANOSLEEP", Const, 0}, + {"SYS_CLOCK_SETTIME", Const, 0}, + {"SYS_CLONE", Const, 0}, + {"SYS_CLOSE", Const, 0}, + {"SYS_CLOSEFROM", Const, 0}, + {"SYS_CLOSE_NOCANCEL", Const, 0}, + {"SYS_CONNECT", Const, 0}, + {"SYS_CONNECTAT", Const, 3}, + {"SYS_CONNECT_NOCANCEL", Const, 0}, + {"SYS_COPYFILE", Const, 0}, + {"SYS_CPUSET", Const, 0}, + {"SYS_CPUSET_GETAFFINITY", Const, 0}, + {"SYS_CPUSET_GETID", Const, 0}, + {"SYS_CPUSET_SETAFFINITY", Const, 0}, + {"SYS_CPUSET_SETID", Const, 0}, + {"SYS_CREAT", Const, 0}, + {"SYS_CREATE_MODULE", Const, 0}, + {"SYS_CSOPS", Const, 0}, + {"SYS_CSOPS_AUDITTOKEN", Const, 16}, + {"SYS_DELETE", Const, 0}, + {"SYS_DELETE_MODULE", Const, 0}, + {"SYS_DUP", Const, 0}, + {"SYS_DUP2", Const, 0}, + {"SYS_DUP3", Const, 0}, + {"SYS_EACCESS", Const, 0}, + {"SYS_EPOLL_CREATE", Const, 0}, + {"SYS_EPOLL_CREATE1", Const, 0}, + {"SYS_EPOLL_CTL", Const, 0}, + {"SYS_EPOLL_CTL_OLD", Const, 0}, + {"SYS_EPOLL_PWAIT", Const, 0}, + {"SYS_EPOLL_WAIT", Const, 0}, + {"SYS_EPOLL_WAIT_OLD", Const, 0}, + {"SYS_EVENTFD", Const, 0}, + {"SYS_EVENTFD2", Const, 0}, + {"SYS_EXCHANGEDATA", Const, 0}, + {"SYS_EXECVE", Const, 0}, + {"SYS_EXIT", Const, 0}, + {"SYS_EXIT_GROUP", Const, 0}, + {"SYS_EXTATTRCTL", Const, 0}, + {"SYS_EXTATTR_DELETE_FD", Const, 0}, + {"SYS_EXTATTR_DELETE_FILE", Const, 0}, + {"SYS_EXTATTR_DELETE_LINK", Const, 0}, + {"SYS_EXTATTR_GET_FD", Const, 0}, + {"SYS_EXTATTR_GET_FILE", Const, 0}, + {"SYS_EXTATTR_GET_LINK", Const, 0}, + {"SYS_EXTATTR_LIST_FD", Const, 0}, + {"SYS_EXTATTR_LIST_FILE", Const, 0}, + {"SYS_EXTATTR_LIST_LINK", Const, 0}, + {"SYS_EXTATTR_SET_FD", Const, 0}, + {"SYS_EXTATTR_SET_FILE", Const, 0}, + {"SYS_EXTATTR_SET_LINK", Const, 0}, + {"SYS_FACCESSAT", Const, 0}, + {"SYS_FADVISE64", Const, 0}, + {"SYS_FADVISE64_64", Const, 0}, + {"SYS_FALLOCATE", Const, 0}, + {"SYS_FANOTIFY_INIT", Const, 0}, + {"SYS_FANOTIFY_MARK", Const, 0}, + {"SYS_FCHDIR", Const, 0}, + {"SYS_FCHFLAGS", Const, 0}, + {"SYS_FCHMOD", Const, 0}, + {"SYS_FCHMODAT", Const, 0}, + {"SYS_FCHMOD_EXTENDED", Const, 0}, + {"SYS_FCHOWN", Const, 0}, + {"SYS_FCHOWN32", Const, 0}, + {"SYS_FCHOWNAT", Const, 0}, + {"SYS_FCHROOT", Const, 1}, + {"SYS_FCNTL", Const, 0}, + {"SYS_FCNTL64", Const, 0}, + {"SYS_FCNTL_NOCANCEL", Const, 0}, + {"SYS_FDATASYNC", Const, 0}, + {"SYS_FEXECVE", Const, 0}, + {"SYS_FFCLOCK_GETCOUNTER", Const, 0}, + {"SYS_FFCLOCK_GETESTIMATE", Const, 0}, + {"SYS_FFCLOCK_SETESTIMATE", Const, 0}, + {"SYS_FFSCTL", Const, 0}, + {"SYS_FGETATTRLIST", Const, 0}, + {"SYS_FGETXATTR", Const, 0}, + {"SYS_FHOPEN", Const, 0}, + {"SYS_FHSTAT", Const, 0}, + {"SYS_FHSTATFS", Const, 0}, + {"SYS_FILEPORT_MAKEFD", Const, 0}, + {"SYS_FILEPORT_MAKEPORT", Const, 0}, + {"SYS_FKTRACE", Const, 1}, + {"SYS_FLISTXATTR", Const, 0}, + {"SYS_FLOCK", Const, 0}, + {"SYS_FORK", Const, 0}, + {"SYS_FPATHCONF", Const, 0}, + {"SYS_FREEBSD6_FTRUNCATE", Const, 0}, + {"SYS_FREEBSD6_LSEEK", Const, 0}, + {"SYS_FREEBSD6_MMAP", Const, 0}, + {"SYS_FREEBSD6_PREAD", Const, 0}, + {"SYS_FREEBSD6_PWRITE", Const, 0}, + {"SYS_FREEBSD6_TRUNCATE", Const, 0}, + {"SYS_FREMOVEXATTR", Const, 0}, + {"SYS_FSCTL", Const, 0}, + {"SYS_FSETATTRLIST", Const, 0}, + {"SYS_FSETXATTR", Const, 0}, + {"SYS_FSGETPATH", Const, 0}, + {"SYS_FSTAT", Const, 0}, + {"SYS_FSTAT64", Const, 0}, + {"SYS_FSTAT64_EXTENDED", Const, 0}, + {"SYS_FSTATAT", Const, 0}, + {"SYS_FSTATAT64", Const, 0}, + {"SYS_FSTATFS", Const, 0}, + {"SYS_FSTATFS64", Const, 0}, + {"SYS_FSTATV", Const, 0}, + {"SYS_FSTATVFS1", Const, 1}, + {"SYS_FSTAT_EXTENDED", Const, 0}, + {"SYS_FSYNC", Const, 0}, + {"SYS_FSYNC_NOCANCEL", Const, 0}, + {"SYS_FSYNC_RANGE", Const, 1}, + {"SYS_FTIME", Const, 0}, + {"SYS_FTRUNCATE", Const, 0}, + {"SYS_FTRUNCATE64", Const, 0}, + {"SYS_FUTEX", Const, 0}, + {"SYS_FUTIMENS", Const, 1}, + {"SYS_FUTIMES", Const, 0}, + {"SYS_FUTIMESAT", Const, 0}, + {"SYS_GETATTRLIST", Const, 0}, + {"SYS_GETAUDIT", Const, 0}, + {"SYS_GETAUDIT_ADDR", Const, 0}, + {"SYS_GETAUID", Const, 0}, + {"SYS_GETCONTEXT", Const, 0}, + {"SYS_GETCPU", Const, 0}, + {"SYS_GETCWD", Const, 0}, + {"SYS_GETDENTS", Const, 0}, + {"SYS_GETDENTS64", Const, 0}, + {"SYS_GETDIRENTRIES", Const, 0}, + {"SYS_GETDIRENTRIES64", Const, 0}, + {"SYS_GETDIRENTRIESATTR", Const, 0}, + {"SYS_GETDTABLECOUNT", Const, 1}, + {"SYS_GETDTABLESIZE", Const, 0}, + {"SYS_GETEGID", Const, 0}, + {"SYS_GETEGID32", Const, 0}, + {"SYS_GETEUID", Const, 0}, + {"SYS_GETEUID32", Const, 0}, + {"SYS_GETFH", Const, 0}, + {"SYS_GETFSSTAT", Const, 0}, + {"SYS_GETFSSTAT64", Const, 0}, + {"SYS_GETGID", Const, 0}, + {"SYS_GETGID32", Const, 0}, + {"SYS_GETGROUPS", Const, 0}, + {"SYS_GETGROUPS32", Const, 0}, + {"SYS_GETHOSTUUID", Const, 0}, + {"SYS_GETITIMER", Const, 0}, + {"SYS_GETLCID", Const, 0}, + {"SYS_GETLOGIN", Const, 0}, + {"SYS_GETLOGINCLASS", Const, 0}, + {"SYS_GETPEERNAME", Const, 0}, + {"SYS_GETPGID", Const, 0}, + {"SYS_GETPGRP", Const, 0}, + {"SYS_GETPID", Const, 0}, + {"SYS_GETPMSG", Const, 0}, + {"SYS_GETPPID", Const, 0}, + {"SYS_GETPRIORITY", Const, 0}, + {"SYS_GETRESGID", Const, 0}, + {"SYS_GETRESGID32", Const, 0}, + {"SYS_GETRESUID", Const, 0}, + {"SYS_GETRESUID32", Const, 0}, + {"SYS_GETRLIMIT", Const, 0}, + {"SYS_GETRTABLE", Const, 1}, + {"SYS_GETRUSAGE", Const, 0}, + {"SYS_GETSGROUPS", Const, 0}, + {"SYS_GETSID", Const, 0}, + {"SYS_GETSOCKNAME", Const, 0}, + {"SYS_GETSOCKOPT", Const, 0}, + {"SYS_GETTHRID", Const, 1}, + {"SYS_GETTID", Const, 0}, + {"SYS_GETTIMEOFDAY", Const, 0}, + {"SYS_GETUID", Const, 0}, + {"SYS_GETUID32", Const, 0}, + {"SYS_GETVFSSTAT", Const, 1}, + {"SYS_GETWGROUPS", Const, 0}, + {"SYS_GETXATTR", Const, 0}, + {"SYS_GET_KERNEL_SYMS", Const, 0}, + {"SYS_GET_MEMPOLICY", Const, 0}, + {"SYS_GET_ROBUST_LIST", Const, 0}, + {"SYS_GET_THREAD_AREA", Const, 0}, + {"SYS_GSSD_SYSCALL", Const, 14}, + {"SYS_GTTY", Const, 0}, + {"SYS_IDENTITYSVC", Const, 0}, + {"SYS_IDLE", Const, 0}, + {"SYS_INITGROUPS", Const, 0}, + {"SYS_INIT_MODULE", Const, 0}, + {"SYS_INOTIFY_ADD_WATCH", Const, 0}, + {"SYS_INOTIFY_INIT", Const, 0}, + {"SYS_INOTIFY_INIT1", Const, 0}, + {"SYS_INOTIFY_RM_WATCH", Const, 0}, + {"SYS_IOCTL", Const, 0}, + {"SYS_IOPERM", Const, 0}, + {"SYS_IOPL", Const, 0}, + {"SYS_IOPOLICYSYS", Const, 0}, + {"SYS_IOPRIO_GET", Const, 0}, + {"SYS_IOPRIO_SET", Const, 0}, + {"SYS_IO_CANCEL", Const, 0}, + {"SYS_IO_DESTROY", Const, 0}, + {"SYS_IO_GETEVENTS", Const, 0}, + {"SYS_IO_SETUP", Const, 0}, + {"SYS_IO_SUBMIT", Const, 0}, + {"SYS_IPC", Const, 0}, + {"SYS_ISSETUGID", Const, 0}, + {"SYS_JAIL", Const, 0}, + {"SYS_JAIL_ATTACH", Const, 0}, + {"SYS_JAIL_GET", Const, 0}, + {"SYS_JAIL_REMOVE", Const, 0}, + {"SYS_JAIL_SET", Const, 0}, + {"SYS_KAS_INFO", Const, 16}, + {"SYS_KDEBUG_TRACE", Const, 0}, + {"SYS_KENV", Const, 0}, + {"SYS_KEVENT", Const, 0}, + {"SYS_KEVENT64", Const, 0}, + {"SYS_KEXEC_LOAD", Const, 0}, + {"SYS_KEYCTL", Const, 0}, + {"SYS_KILL", Const, 0}, + {"SYS_KLDFIND", Const, 0}, + {"SYS_KLDFIRSTMOD", Const, 0}, + {"SYS_KLDLOAD", Const, 0}, + {"SYS_KLDNEXT", Const, 0}, + {"SYS_KLDSTAT", Const, 0}, + {"SYS_KLDSYM", Const, 0}, + {"SYS_KLDUNLOAD", Const, 0}, + {"SYS_KLDUNLOADF", Const, 0}, + {"SYS_KMQ_NOTIFY", Const, 14}, + {"SYS_KMQ_OPEN", Const, 14}, + {"SYS_KMQ_SETATTR", Const, 14}, + {"SYS_KMQ_TIMEDRECEIVE", Const, 14}, + {"SYS_KMQ_TIMEDSEND", Const, 14}, + {"SYS_KMQ_UNLINK", Const, 14}, + {"SYS_KQUEUE", Const, 0}, + {"SYS_KQUEUE1", Const, 1}, + {"SYS_KSEM_CLOSE", Const, 14}, + {"SYS_KSEM_DESTROY", Const, 14}, + {"SYS_KSEM_GETVALUE", Const, 14}, + {"SYS_KSEM_INIT", Const, 14}, + {"SYS_KSEM_OPEN", Const, 14}, + {"SYS_KSEM_POST", Const, 14}, + {"SYS_KSEM_TIMEDWAIT", Const, 14}, + {"SYS_KSEM_TRYWAIT", Const, 14}, + {"SYS_KSEM_UNLINK", Const, 14}, + {"SYS_KSEM_WAIT", Const, 14}, + {"SYS_KTIMER_CREATE", Const, 0}, + {"SYS_KTIMER_DELETE", Const, 0}, + {"SYS_KTIMER_GETOVERRUN", Const, 0}, + {"SYS_KTIMER_GETTIME", Const, 0}, + {"SYS_KTIMER_SETTIME", Const, 0}, + {"SYS_KTRACE", Const, 0}, + {"SYS_LCHFLAGS", Const, 0}, + {"SYS_LCHMOD", Const, 0}, + {"SYS_LCHOWN", Const, 0}, + {"SYS_LCHOWN32", Const, 0}, + {"SYS_LEDGER", Const, 16}, + {"SYS_LGETFH", Const, 0}, + {"SYS_LGETXATTR", Const, 0}, + {"SYS_LINK", Const, 0}, + {"SYS_LINKAT", Const, 0}, + {"SYS_LIO_LISTIO", Const, 0}, + {"SYS_LISTEN", Const, 0}, + {"SYS_LISTXATTR", Const, 0}, + {"SYS_LLISTXATTR", Const, 0}, + {"SYS_LOCK", Const, 0}, + {"SYS_LOOKUP_DCOOKIE", Const, 0}, + {"SYS_LPATHCONF", Const, 0}, + {"SYS_LREMOVEXATTR", Const, 0}, + {"SYS_LSEEK", Const, 0}, + {"SYS_LSETXATTR", Const, 0}, + {"SYS_LSTAT", Const, 0}, + {"SYS_LSTAT64", Const, 0}, + {"SYS_LSTAT64_EXTENDED", Const, 0}, + {"SYS_LSTATV", Const, 0}, + {"SYS_LSTAT_EXTENDED", Const, 0}, + {"SYS_LUTIMES", Const, 0}, + {"SYS_MAC_SYSCALL", Const, 0}, + {"SYS_MADVISE", Const, 0}, + {"SYS_MADVISE1", Const, 0}, + {"SYS_MAXSYSCALL", Const, 0}, + {"SYS_MBIND", Const, 0}, + {"SYS_MIGRATE_PAGES", Const, 0}, + {"SYS_MINCORE", Const, 0}, + {"SYS_MINHERIT", Const, 0}, + {"SYS_MKCOMPLEX", Const, 0}, + {"SYS_MKDIR", Const, 0}, + {"SYS_MKDIRAT", Const, 0}, + {"SYS_MKDIR_EXTENDED", Const, 0}, + {"SYS_MKFIFO", Const, 0}, + {"SYS_MKFIFOAT", Const, 0}, + {"SYS_MKFIFO_EXTENDED", Const, 0}, + {"SYS_MKNOD", Const, 0}, + {"SYS_MKNODAT", Const, 0}, + {"SYS_MLOCK", Const, 0}, + {"SYS_MLOCKALL", Const, 0}, + {"SYS_MMAP", Const, 0}, + {"SYS_MMAP2", Const, 0}, + {"SYS_MODCTL", Const, 1}, + {"SYS_MODFIND", Const, 0}, + {"SYS_MODFNEXT", Const, 0}, + {"SYS_MODIFY_LDT", Const, 0}, + {"SYS_MODNEXT", Const, 0}, + {"SYS_MODSTAT", Const, 0}, + {"SYS_MODWATCH", Const, 0}, + {"SYS_MOUNT", Const, 0}, + {"SYS_MOVE_PAGES", Const, 0}, + {"SYS_MPROTECT", Const, 0}, + {"SYS_MPX", Const, 0}, + {"SYS_MQUERY", Const, 1}, + {"SYS_MQ_GETSETATTR", Const, 0}, + {"SYS_MQ_NOTIFY", Const, 0}, + {"SYS_MQ_OPEN", Const, 0}, + {"SYS_MQ_TIMEDRECEIVE", Const, 0}, + {"SYS_MQ_TIMEDSEND", Const, 0}, + {"SYS_MQ_UNLINK", Const, 0}, + {"SYS_MREMAP", Const, 0}, + {"SYS_MSGCTL", Const, 0}, + {"SYS_MSGGET", Const, 0}, + {"SYS_MSGRCV", Const, 0}, + {"SYS_MSGRCV_NOCANCEL", Const, 0}, + {"SYS_MSGSND", Const, 0}, + {"SYS_MSGSND_NOCANCEL", Const, 0}, + {"SYS_MSGSYS", Const, 0}, + {"SYS_MSYNC", Const, 0}, + {"SYS_MSYNC_NOCANCEL", Const, 0}, + {"SYS_MUNLOCK", Const, 0}, + {"SYS_MUNLOCKALL", Const, 0}, + {"SYS_MUNMAP", Const, 0}, + {"SYS_NAME_TO_HANDLE_AT", Const, 0}, + {"SYS_NANOSLEEP", Const, 0}, + {"SYS_NEWFSTATAT", Const, 0}, + {"SYS_NFSCLNT", Const, 0}, + {"SYS_NFSSERVCTL", Const, 0}, + {"SYS_NFSSVC", Const, 0}, + {"SYS_NFSTAT", Const, 0}, + {"SYS_NICE", Const, 0}, + {"SYS_NLM_SYSCALL", Const, 14}, + {"SYS_NLSTAT", Const, 0}, + {"SYS_NMOUNT", Const, 0}, + {"SYS_NSTAT", Const, 0}, + {"SYS_NTP_ADJTIME", Const, 0}, + {"SYS_NTP_GETTIME", Const, 0}, + {"SYS_NUMA_GETAFFINITY", Const, 14}, + {"SYS_NUMA_SETAFFINITY", Const, 14}, + {"SYS_OABI_SYSCALL_BASE", Const, 0}, + {"SYS_OBREAK", Const, 0}, + {"SYS_OLDFSTAT", Const, 0}, + {"SYS_OLDLSTAT", Const, 0}, + {"SYS_OLDOLDUNAME", Const, 0}, + {"SYS_OLDSTAT", Const, 0}, + {"SYS_OLDUNAME", Const, 0}, + {"SYS_OPEN", Const, 0}, + {"SYS_OPENAT", Const, 0}, + {"SYS_OPENBSD_POLL", Const, 0}, + {"SYS_OPEN_BY_HANDLE_AT", Const, 0}, + {"SYS_OPEN_DPROTECTED_NP", Const, 16}, + {"SYS_OPEN_EXTENDED", Const, 0}, + {"SYS_OPEN_NOCANCEL", Const, 0}, + {"SYS_OVADVISE", Const, 0}, + {"SYS_PACCEPT", Const, 1}, + {"SYS_PATHCONF", Const, 0}, + {"SYS_PAUSE", Const, 0}, + {"SYS_PCICONFIG_IOBASE", Const, 0}, + {"SYS_PCICONFIG_READ", Const, 0}, + {"SYS_PCICONFIG_WRITE", Const, 0}, + {"SYS_PDFORK", Const, 0}, + {"SYS_PDGETPID", Const, 0}, + {"SYS_PDKILL", Const, 0}, + {"SYS_PERF_EVENT_OPEN", Const, 0}, + {"SYS_PERSONALITY", Const, 0}, + {"SYS_PID_HIBERNATE", Const, 0}, + {"SYS_PID_RESUME", Const, 0}, + {"SYS_PID_SHUTDOWN_SOCKETS", Const, 0}, + {"SYS_PID_SUSPEND", Const, 0}, + {"SYS_PIPE", Const, 0}, + {"SYS_PIPE2", Const, 0}, + {"SYS_PIVOT_ROOT", Const, 0}, + {"SYS_PMC_CONTROL", Const, 1}, + {"SYS_PMC_GET_INFO", Const, 1}, + {"SYS_POLL", Const, 0}, + {"SYS_POLLTS", Const, 1}, + {"SYS_POLL_NOCANCEL", Const, 0}, + {"SYS_POSIX_FADVISE", Const, 0}, + {"SYS_POSIX_FALLOCATE", Const, 0}, + {"SYS_POSIX_OPENPT", Const, 0}, + {"SYS_POSIX_SPAWN", Const, 0}, + {"SYS_PPOLL", Const, 0}, + {"SYS_PRCTL", Const, 0}, + {"SYS_PREAD", Const, 0}, + {"SYS_PREAD64", Const, 0}, + {"SYS_PREADV", Const, 0}, + {"SYS_PREAD_NOCANCEL", Const, 0}, + {"SYS_PRLIMIT64", Const, 0}, + {"SYS_PROCCTL", Const, 3}, + {"SYS_PROCESS_POLICY", Const, 0}, + {"SYS_PROCESS_VM_READV", Const, 0}, + {"SYS_PROCESS_VM_WRITEV", Const, 0}, + {"SYS_PROC_INFO", Const, 0}, + {"SYS_PROF", Const, 0}, + {"SYS_PROFIL", Const, 0}, + {"SYS_PSELECT", Const, 0}, + {"SYS_PSELECT6", Const, 0}, + {"SYS_PSET_ASSIGN", Const, 1}, + {"SYS_PSET_CREATE", Const, 1}, + {"SYS_PSET_DESTROY", Const, 1}, + {"SYS_PSYNCH_CVBROAD", Const, 0}, + {"SYS_PSYNCH_CVCLRPREPOST", Const, 0}, + {"SYS_PSYNCH_CVSIGNAL", Const, 0}, + {"SYS_PSYNCH_CVWAIT", Const, 0}, + {"SYS_PSYNCH_MUTEXDROP", Const, 0}, + {"SYS_PSYNCH_MUTEXWAIT", Const, 0}, + {"SYS_PSYNCH_RW_DOWNGRADE", Const, 0}, + {"SYS_PSYNCH_RW_LONGRDLOCK", Const, 0}, + {"SYS_PSYNCH_RW_RDLOCK", Const, 0}, + {"SYS_PSYNCH_RW_UNLOCK", Const, 0}, + {"SYS_PSYNCH_RW_UNLOCK2", Const, 0}, + {"SYS_PSYNCH_RW_UPGRADE", Const, 0}, + {"SYS_PSYNCH_RW_WRLOCK", Const, 0}, + {"SYS_PSYNCH_RW_YIELDWRLOCK", Const, 0}, + {"SYS_PTRACE", Const, 0}, + {"SYS_PUTPMSG", Const, 0}, + {"SYS_PWRITE", Const, 0}, + {"SYS_PWRITE64", Const, 0}, + {"SYS_PWRITEV", Const, 0}, + {"SYS_PWRITE_NOCANCEL", Const, 0}, + {"SYS_QUERY_MODULE", Const, 0}, + {"SYS_QUOTACTL", Const, 0}, + {"SYS_RASCTL", Const, 1}, + {"SYS_RCTL_ADD_RULE", Const, 0}, + {"SYS_RCTL_GET_LIMITS", Const, 0}, + {"SYS_RCTL_GET_RACCT", Const, 0}, + {"SYS_RCTL_GET_RULES", Const, 0}, + {"SYS_RCTL_REMOVE_RULE", Const, 0}, + {"SYS_READ", Const, 0}, + {"SYS_READAHEAD", Const, 0}, + {"SYS_READDIR", Const, 0}, + {"SYS_READLINK", Const, 0}, + {"SYS_READLINKAT", Const, 0}, + {"SYS_READV", Const, 0}, + {"SYS_READV_NOCANCEL", Const, 0}, + {"SYS_READ_NOCANCEL", Const, 0}, + {"SYS_REBOOT", Const, 0}, + {"SYS_RECV", Const, 0}, + {"SYS_RECVFROM", Const, 0}, + {"SYS_RECVFROM_NOCANCEL", Const, 0}, + {"SYS_RECVMMSG", Const, 0}, + {"SYS_RECVMSG", Const, 0}, + {"SYS_RECVMSG_NOCANCEL", Const, 0}, + {"SYS_REMAP_FILE_PAGES", Const, 0}, + {"SYS_REMOVEXATTR", Const, 0}, + {"SYS_RENAME", Const, 0}, + {"SYS_RENAMEAT", Const, 0}, + {"SYS_REQUEST_KEY", Const, 0}, + {"SYS_RESTART_SYSCALL", Const, 0}, + {"SYS_REVOKE", Const, 0}, + {"SYS_RFORK", Const, 0}, + {"SYS_RMDIR", Const, 0}, + {"SYS_RTPRIO", Const, 0}, + {"SYS_RTPRIO_THREAD", Const, 0}, + {"SYS_RT_SIGACTION", Const, 0}, + {"SYS_RT_SIGPENDING", Const, 0}, + {"SYS_RT_SIGPROCMASK", Const, 0}, + {"SYS_RT_SIGQUEUEINFO", Const, 0}, + {"SYS_RT_SIGRETURN", Const, 0}, + {"SYS_RT_SIGSUSPEND", Const, 0}, + {"SYS_RT_SIGTIMEDWAIT", Const, 0}, + {"SYS_RT_TGSIGQUEUEINFO", Const, 0}, + {"SYS_SBRK", Const, 0}, + {"SYS_SCHED_GETAFFINITY", Const, 0}, + {"SYS_SCHED_GETPARAM", Const, 0}, + {"SYS_SCHED_GETSCHEDULER", Const, 0}, + {"SYS_SCHED_GET_PRIORITY_MAX", Const, 0}, + {"SYS_SCHED_GET_PRIORITY_MIN", Const, 0}, + {"SYS_SCHED_RR_GET_INTERVAL", Const, 0}, + {"SYS_SCHED_SETAFFINITY", Const, 0}, + {"SYS_SCHED_SETPARAM", Const, 0}, + {"SYS_SCHED_SETSCHEDULER", Const, 0}, + {"SYS_SCHED_YIELD", Const, 0}, + {"SYS_SCTP_GENERIC_RECVMSG", Const, 0}, + {"SYS_SCTP_GENERIC_SENDMSG", Const, 0}, + {"SYS_SCTP_GENERIC_SENDMSG_IOV", Const, 0}, + {"SYS_SCTP_PEELOFF", Const, 0}, + {"SYS_SEARCHFS", Const, 0}, + {"SYS_SECURITY", Const, 0}, + {"SYS_SELECT", Const, 0}, + {"SYS_SELECT_NOCANCEL", Const, 0}, + {"SYS_SEMCONFIG", Const, 1}, + {"SYS_SEMCTL", Const, 0}, + {"SYS_SEMGET", Const, 0}, + {"SYS_SEMOP", Const, 0}, + {"SYS_SEMSYS", Const, 0}, + {"SYS_SEMTIMEDOP", Const, 0}, + {"SYS_SEM_CLOSE", Const, 0}, + {"SYS_SEM_DESTROY", Const, 0}, + {"SYS_SEM_GETVALUE", Const, 0}, + {"SYS_SEM_INIT", Const, 0}, + {"SYS_SEM_OPEN", Const, 0}, + {"SYS_SEM_POST", Const, 0}, + {"SYS_SEM_TRYWAIT", Const, 0}, + {"SYS_SEM_UNLINK", Const, 0}, + {"SYS_SEM_WAIT", Const, 0}, + {"SYS_SEM_WAIT_NOCANCEL", Const, 0}, + {"SYS_SEND", Const, 0}, + {"SYS_SENDFILE", Const, 0}, + {"SYS_SENDFILE64", Const, 0}, + {"SYS_SENDMMSG", Const, 0}, + {"SYS_SENDMSG", Const, 0}, + {"SYS_SENDMSG_NOCANCEL", Const, 0}, + {"SYS_SENDTO", Const, 0}, + {"SYS_SENDTO_NOCANCEL", Const, 0}, + {"SYS_SETATTRLIST", Const, 0}, + {"SYS_SETAUDIT", Const, 0}, + {"SYS_SETAUDIT_ADDR", Const, 0}, + {"SYS_SETAUID", Const, 0}, + {"SYS_SETCONTEXT", Const, 0}, + {"SYS_SETDOMAINNAME", Const, 0}, + {"SYS_SETEGID", Const, 0}, + {"SYS_SETEUID", Const, 0}, + {"SYS_SETFIB", Const, 0}, + {"SYS_SETFSGID", Const, 0}, + {"SYS_SETFSGID32", Const, 0}, + {"SYS_SETFSUID", Const, 0}, + {"SYS_SETFSUID32", Const, 0}, + {"SYS_SETGID", Const, 0}, + {"SYS_SETGID32", Const, 0}, + {"SYS_SETGROUPS", Const, 0}, + {"SYS_SETGROUPS32", Const, 0}, + {"SYS_SETHOSTNAME", Const, 0}, + {"SYS_SETITIMER", Const, 0}, + {"SYS_SETLCID", Const, 0}, + {"SYS_SETLOGIN", Const, 0}, + {"SYS_SETLOGINCLASS", Const, 0}, + {"SYS_SETNS", Const, 0}, + {"SYS_SETPGID", Const, 0}, + {"SYS_SETPRIORITY", Const, 0}, + {"SYS_SETPRIVEXEC", Const, 0}, + {"SYS_SETREGID", Const, 0}, + {"SYS_SETREGID32", Const, 0}, + {"SYS_SETRESGID", Const, 0}, + {"SYS_SETRESGID32", Const, 0}, + {"SYS_SETRESUID", Const, 0}, + {"SYS_SETRESUID32", Const, 0}, + {"SYS_SETREUID", Const, 0}, + {"SYS_SETREUID32", Const, 0}, + {"SYS_SETRLIMIT", Const, 0}, + {"SYS_SETRTABLE", Const, 1}, + {"SYS_SETSGROUPS", Const, 0}, + {"SYS_SETSID", Const, 0}, + {"SYS_SETSOCKOPT", Const, 0}, + {"SYS_SETTID", Const, 0}, + {"SYS_SETTID_WITH_PID", Const, 0}, + {"SYS_SETTIMEOFDAY", Const, 0}, + {"SYS_SETUID", Const, 0}, + {"SYS_SETUID32", Const, 0}, + {"SYS_SETWGROUPS", Const, 0}, + {"SYS_SETXATTR", Const, 0}, + {"SYS_SET_MEMPOLICY", Const, 0}, + {"SYS_SET_ROBUST_LIST", Const, 0}, + {"SYS_SET_THREAD_AREA", Const, 0}, + {"SYS_SET_TID_ADDRESS", Const, 0}, + {"SYS_SGETMASK", Const, 0}, + {"SYS_SHARED_REGION_CHECK_NP", Const, 0}, + {"SYS_SHARED_REGION_MAP_AND_SLIDE_NP", Const, 0}, + {"SYS_SHMAT", Const, 0}, + {"SYS_SHMCTL", Const, 0}, + {"SYS_SHMDT", Const, 0}, + {"SYS_SHMGET", Const, 0}, + {"SYS_SHMSYS", Const, 0}, + {"SYS_SHM_OPEN", Const, 0}, + {"SYS_SHM_UNLINK", Const, 0}, + {"SYS_SHUTDOWN", Const, 0}, + {"SYS_SIGACTION", Const, 0}, + {"SYS_SIGALTSTACK", Const, 0}, + {"SYS_SIGNAL", Const, 0}, + {"SYS_SIGNALFD", Const, 0}, + {"SYS_SIGNALFD4", Const, 0}, + {"SYS_SIGPENDING", Const, 0}, + {"SYS_SIGPROCMASK", Const, 0}, + {"SYS_SIGQUEUE", Const, 0}, + {"SYS_SIGQUEUEINFO", Const, 1}, + {"SYS_SIGRETURN", Const, 0}, + {"SYS_SIGSUSPEND", Const, 0}, + {"SYS_SIGSUSPEND_NOCANCEL", Const, 0}, + {"SYS_SIGTIMEDWAIT", Const, 0}, + {"SYS_SIGWAIT", Const, 0}, + {"SYS_SIGWAITINFO", Const, 0}, + {"SYS_SOCKET", Const, 0}, + {"SYS_SOCKETCALL", Const, 0}, + {"SYS_SOCKETPAIR", Const, 0}, + {"SYS_SPLICE", Const, 0}, + {"SYS_SSETMASK", Const, 0}, + {"SYS_SSTK", Const, 0}, + {"SYS_STACK_SNAPSHOT", Const, 0}, + {"SYS_STAT", Const, 0}, + {"SYS_STAT64", Const, 0}, + {"SYS_STAT64_EXTENDED", Const, 0}, + {"SYS_STATFS", Const, 0}, + {"SYS_STATFS64", Const, 0}, + {"SYS_STATV", Const, 0}, + {"SYS_STATVFS1", Const, 1}, + {"SYS_STAT_EXTENDED", Const, 0}, + {"SYS_STIME", Const, 0}, + {"SYS_STTY", Const, 0}, + {"SYS_SWAPCONTEXT", Const, 0}, + {"SYS_SWAPCTL", Const, 1}, + {"SYS_SWAPOFF", Const, 0}, + {"SYS_SWAPON", Const, 0}, + {"SYS_SYMLINK", Const, 0}, + {"SYS_SYMLINKAT", Const, 0}, + {"SYS_SYNC", Const, 0}, + {"SYS_SYNCFS", Const, 0}, + {"SYS_SYNC_FILE_RANGE", Const, 0}, + {"SYS_SYSARCH", Const, 0}, + {"SYS_SYSCALL", Const, 0}, + {"SYS_SYSCALL_BASE", Const, 0}, + {"SYS_SYSFS", Const, 0}, + {"SYS_SYSINFO", Const, 0}, + {"SYS_SYSLOG", Const, 0}, + {"SYS_TEE", Const, 0}, + {"SYS_TGKILL", Const, 0}, + {"SYS_THREAD_SELFID", Const, 0}, + {"SYS_THR_CREATE", Const, 0}, + {"SYS_THR_EXIT", Const, 0}, + {"SYS_THR_KILL", Const, 0}, + {"SYS_THR_KILL2", Const, 0}, + {"SYS_THR_NEW", Const, 0}, + {"SYS_THR_SELF", Const, 0}, + {"SYS_THR_SET_NAME", Const, 0}, + {"SYS_THR_SUSPEND", Const, 0}, + {"SYS_THR_WAKE", Const, 0}, + {"SYS_TIME", Const, 0}, + {"SYS_TIMERFD_CREATE", Const, 0}, + {"SYS_TIMERFD_GETTIME", Const, 0}, + {"SYS_TIMERFD_SETTIME", Const, 0}, + {"SYS_TIMER_CREATE", Const, 0}, + {"SYS_TIMER_DELETE", Const, 0}, + {"SYS_TIMER_GETOVERRUN", Const, 0}, + {"SYS_TIMER_GETTIME", Const, 0}, + {"SYS_TIMER_SETTIME", Const, 0}, + {"SYS_TIMES", Const, 0}, + {"SYS_TKILL", Const, 0}, + {"SYS_TRUNCATE", Const, 0}, + {"SYS_TRUNCATE64", Const, 0}, + {"SYS_TUXCALL", Const, 0}, + {"SYS_UGETRLIMIT", Const, 0}, + {"SYS_ULIMIT", Const, 0}, + {"SYS_UMASK", Const, 0}, + {"SYS_UMASK_EXTENDED", Const, 0}, + {"SYS_UMOUNT", Const, 0}, + {"SYS_UMOUNT2", Const, 0}, + {"SYS_UNAME", Const, 0}, + {"SYS_UNDELETE", Const, 0}, + {"SYS_UNLINK", Const, 0}, + {"SYS_UNLINKAT", Const, 0}, + {"SYS_UNMOUNT", Const, 0}, + {"SYS_UNSHARE", Const, 0}, + {"SYS_USELIB", Const, 0}, + {"SYS_USTAT", Const, 0}, + {"SYS_UTIME", Const, 0}, + {"SYS_UTIMENSAT", Const, 0}, + {"SYS_UTIMES", Const, 0}, + {"SYS_UTRACE", Const, 0}, + {"SYS_UUIDGEN", Const, 0}, + {"SYS_VADVISE", Const, 1}, + {"SYS_VFORK", Const, 0}, + {"SYS_VHANGUP", Const, 0}, + {"SYS_VM86", Const, 0}, + {"SYS_VM86OLD", Const, 0}, + {"SYS_VMSPLICE", Const, 0}, + {"SYS_VM_PRESSURE_MONITOR", Const, 0}, + {"SYS_VSERVER", Const, 0}, + {"SYS_WAIT4", Const, 0}, + {"SYS_WAIT4_NOCANCEL", Const, 0}, + {"SYS_WAIT6", Const, 1}, + {"SYS_WAITEVENT", Const, 0}, + {"SYS_WAITID", Const, 0}, + {"SYS_WAITID_NOCANCEL", Const, 0}, + {"SYS_WAITPID", Const, 0}, + {"SYS_WATCHEVENT", Const, 0}, + {"SYS_WORKQ_KERNRETURN", Const, 0}, + {"SYS_WORKQ_OPEN", Const, 0}, + {"SYS_WRITE", Const, 0}, + {"SYS_WRITEV", Const, 0}, + {"SYS_WRITEV_NOCANCEL", Const, 0}, + {"SYS_WRITE_NOCANCEL", Const, 0}, + {"SYS_YIELD", Const, 0}, + {"SYS__LLSEEK", Const, 0}, + {"SYS__LWP_CONTINUE", Const, 1}, + {"SYS__LWP_CREATE", Const, 1}, + {"SYS__LWP_CTL", Const, 1}, + {"SYS__LWP_DETACH", Const, 1}, + {"SYS__LWP_EXIT", Const, 1}, + {"SYS__LWP_GETNAME", Const, 1}, + {"SYS__LWP_GETPRIVATE", Const, 1}, + {"SYS__LWP_KILL", Const, 1}, + {"SYS__LWP_PARK", Const, 1}, + {"SYS__LWP_SELF", Const, 1}, + {"SYS__LWP_SETNAME", Const, 1}, + {"SYS__LWP_SETPRIVATE", Const, 1}, + {"SYS__LWP_SUSPEND", Const, 1}, + {"SYS__LWP_UNPARK", Const, 1}, + {"SYS__LWP_UNPARK_ALL", Const, 1}, + {"SYS__LWP_WAIT", Const, 1}, + {"SYS__LWP_WAKEUP", Const, 1}, + {"SYS__NEWSELECT", Const, 0}, + {"SYS__PSET_BIND", Const, 1}, + {"SYS__SCHED_GETAFFINITY", Const, 1}, + {"SYS__SCHED_GETPARAM", Const, 1}, + {"SYS__SCHED_SETAFFINITY", Const, 1}, + {"SYS__SCHED_SETPARAM", Const, 1}, + {"SYS__SYSCTL", Const, 0}, + {"SYS__UMTX_LOCK", Const, 0}, + {"SYS__UMTX_OP", Const, 0}, + {"SYS__UMTX_UNLOCK", Const, 0}, + {"SYS___ACL_ACLCHECK_FD", Const, 0}, + {"SYS___ACL_ACLCHECK_FILE", Const, 0}, + {"SYS___ACL_ACLCHECK_LINK", Const, 0}, + {"SYS___ACL_DELETE_FD", Const, 0}, + {"SYS___ACL_DELETE_FILE", Const, 0}, + {"SYS___ACL_DELETE_LINK", Const, 0}, + {"SYS___ACL_GET_FD", Const, 0}, + {"SYS___ACL_GET_FILE", Const, 0}, + {"SYS___ACL_GET_LINK", Const, 0}, + {"SYS___ACL_SET_FD", Const, 0}, + {"SYS___ACL_SET_FILE", Const, 0}, + {"SYS___ACL_SET_LINK", Const, 0}, + {"SYS___CAP_RIGHTS_GET", Const, 14}, + {"SYS___CLONE", Const, 1}, + {"SYS___DISABLE_THREADSIGNAL", Const, 0}, + {"SYS___GETCWD", Const, 0}, + {"SYS___GETLOGIN", Const, 1}, + {"SYS___GET_TCB", Const, 1}, + {"SYS___MAC_EXECVE", Const, 0}, + {"SYS___MAC_GETFSSTAT", Const, 0}, + {"SYS___MAC_GET_FD", Const, 0}, + {"SYS___MAC_GET_FILE", Const, 0}, + {"SYS___MAC_GET_LCID", Const, 0}, + {"SYS___MAC_GET_LCTX", Const, 0}, + {"SYS___MAC_GET_LINK", Const, 0}, + {"SYS___MAC_GET_MOUNT", Const, 0}, + {"SYS___MAC_GET_PID", Const, 0}, + {"SYS___MAC_GET_PROC", Const, 0}, + {"SYS___MAC_MOUNT", Const, 0}, + {"SYS___MAC_SET_FD", Const, 0}, + {"SYS___MAC_SET_FILE", Const, 0}, + {"SYS___MAC_SET_LCTX", Const, 0}, + {"SYS___MAC_SET_LINK", Const, 0}, + {"SYS___MAC_SET_PROC", Const, 0}, + {"SYS___MAC_SYSCALL", Const, 0}, + {"SYS___OLD_SEMWAIT_SIGNAL", Const, 0}, + {"SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL", Const, 0}, + {"SYS___POSIX_CHOWN", Const, 1}, + {"SYS___POSIX_FCHOWN", Const, 1}, + {"SYS___POSIX_LCHOWN", Const, 1}, + {"SYS___POSIX_RENAME", Const, 1}, + {"SYS___PTHREAD_CANCELED", Const, 0}, + {"SYS___PTHREAD_CHDIR", Const, 0}, + {"SYS___PTHREAD_FCHDIR", Const, 0}, + {"SYS___PTHREAD_KILL", Const, 0}, + {"SYS___PTHREAD_MARKCANCEL", Const, 0}, + {"SYS___PTHREAD_SIGMASK", Const, 0}, + {"SYS___QUOTACTL", Const, 1}, + {"SYS___SEMCTL", Const, 1}, + {"SYS___SEMWAIT_SIGNAL", Const, 0}, + {"SYS___SEMWAIT_SIGNAL_NOCANCEL", Const, 0}, + {"SYS___SETLOGIN", Const, 1}, + {"SYS___SETUGID", Const, 0}, + {"SYS___SET_TCB", Const, 1}, + {"SYS___SIGACTION_SIGTRAMP", Const, 1}, + {"SYS___SIGTIMEDWAIT", Const, 1}, + {"SYS___SIGWAIT", Const, 0}, + {"SYS___SIGWAIT_NOCANCEL", Const, 0}, + {"SYS___SYSCTL", Const, 0}, + {"SYS___TFORK", Const, 1}, + {"SYS___THREXIT", Const, 1}, + {"SYS___THRSIGDIVERT", Const, 1}, + {"SYS___THRSLEEP", Const, 1}, + {"SYS___THRWAKEUP", Const, 1}, + {"S_ARCH1", Const, 1}, + {"S_ARCH2", Const, 1}, + {"S_BLKSIZE", Const, 0}, + {"S_IEXEC", Const, 0}, + {"S_IFBLK", Const, 0}, + {"S_IFCHR", Const, 0}, + {"S_IFDIR", Const, 0}, + {"S_IFIFO", Const, 0}, + {"S_IFLNK", Const, 0}, + {"S_IFMT", Const, 0}, + {"S_IFREG", Const, 0}, + {"S_IFSOCK", Const, 0}, + {"S_IFWHT", Const, 0}, + {"S_IREAD", Const, 0}, + {"S_IRGRP", Const, 0}, + {"S_IROTH", Const, 0}, + {"S_IRUSR", Const, 0}, + {"S_IRWXG", Const, 0}, + {"S_IRWXO", Const, 0}, + {"S_IRWXU", Const, 0}, + {"S_ISGID", Const, 0}, + {"S_ISTXT", Const, 0}, + {"S_ISUID", Const, 0}, + {"S_ISVTX", Const, 0}, + {"S_IWGRP", Const, 0}, + {"S_IWOTH", Const, 0}, + {"S_IWRITE", Const, 0}, + {"S_IWUSR", Const, 0}, + {"S_IXGRP", Const, 0}, + {"S_IXOTH", Const, 0}, + {"S_IXUSR", Const, 0}, + {"S_LOGIN_SET", Const, 1}, + {"SecurityAttributes", Type, 0}, + {"SecurityAttributes.InheritHandle", Field, 0}, + {"SecurityAttributes.Length", Field, 0}, + {"SecurityAttributes.SecurityDescriptor", Field, 0}, + {"Seek", Func, 0}, + {"Select", Func, 0}, + {"Sendfile", Func, 0}, + {"Sendmsg", Func, 0}, + {"SendmsgN", Func, 3}, + {"Sendto", Func, 0}, + {"Servent", Type, 0}, + {"Servent.Aliases", Field, 0}, + {"Servent.Name", Field, 0}, + {"Servent.Port", Field, 0}, + {"Servent.Proto", Field, 0}, + {"SetBpf", Func, 0}, + {"SetBpfBuflen", Func, 0}, + {"SetBpfDatalink", Func, 0}, + {"SetBpfHeadercmpl", Func, 0}, + {"SetBpfImmediate", Func, 0}, + {"SetBpfInterface", Func, 0}, + {"SetBpfPromisc", Func, 0}, + {"SetBpfTimeout", Func, 0}, + {"SetCurrentDirectory", Func, 0}, + {"SetEndOfFile", Func, 0}, + {"SetEnvironmentVariable", Func, 0}, + {"SetFileAttributes", Func, 0}, + {"SetFileCompletionNotificationModes", Func, 2}, + {"SetFilePointer", Func, 0}, + {"SetFileTime", Func, 0}, + {"SetHandleInformation", Func, 0}, + {"SetKevent", Func, 0}, + {"SetLsfPromisc", Func, 0}, + {"SetNonblock", Func, 0}, + {"Setdomainname", Func, 0}, + {"Setegid", Func, 0}, + {"Setenv", Func, 0}, + {"Seteuid", Func, 0}, + {"Setfsgid", Func, 0}, + {"Setfsuid", Func, 0}, + {"Setgid", Func, 0}, + {"Setgroups", Func, 0}, + {"Sethostname", Func, 0}, + {"Setlogin", Func, 0}, + {"Setpgid", Func, 0}, + {"Setpriority", Func, 0}, + {"Setprivexec", Func, 0}, + {"Setregid", Func, 0}, + {"Setresgid", Func, 0}, + {"Setresuid", Func, 0}, + {"Setreuid", Func, 0}, + {"Setrlimit", Func, 0}, + {"Setsid", Func, 0}, + {"Setsockopt", Func, 0}, + {"SetsockoptByte", Func, 0}, + {"SetsockoptICMPv6Filter", Func, 2}, + {"SetsockoptIPMreq", Func, 0}, + {"SetsockoptIPMreqn", Func, 0}, + {"SetsockoptIPv6Mreq", Func, 0}, + {"SetsockoptInet4Addr", Func, 0}, + {"SetsockoptInt", Func, 0}, + {"SetsockoptLinger", Func, 0}, + {"SetsockoptString", Func, 0}, + {"SetsockoptTimeval", Func, 0}, + {"Settimeofday", Func, 0}, + {"Setuid", Func, 0}, + {"Setxattr", Func, 1}, + {"Shutdown", Func, 0}, + {"SidTypeAlias", Const, 0}, + {"SidTypeComputer", Const, 0}, + {"SidTypeDeletedAccount", Const, 0}, + {"SidTypeDomain", Const, 0}, + {"SidTypeGroup", Const, 0}, + {"SidTypeInvalid", Const, 0}, + {"SidTypeLabel", Const, 0}, + {"SidTypeUnknown", Const, 0}, + {"SidTypeUser", Const, 0}, + {"SidTypeWellKnownGroup", Const, 0}, + {"Signal", Type, 0}, + {"SizeofBpfHdr", Const, 0}, + {"SizeofBpfInsn", Const, 0}, + {"SizeofBpfProgram", Const, 0}, + {"SizeofBpfStat", Const, 0}, + {"SizeofBpfVersion", Const, 0}, + {"SizeofBpfZbuf", Const, 0}, + {"SizeofBpfZbufHeader", Const, 0}, + {"SizeofCmsghdr", Const, 0}, + {"SizeofICMPv6Filter", Const, 2}, + {"SizeofIPMreq", Const, 0}, + {"SizeofIPMreqn", Const, 0}, + {"SizeofIPv6MTUInfo", Const, 2}, + {"SizeofIPv6Mreq", Const, 0}, + {"SizeofIfAddrmsg", Const, 0}, + {"SizeofIfAnnounceMsghdr", Const, 1}, + {"SizeofIfData", Const, 0}, + {"SizeofIfInfomsg", Const, 0}, + {"SizeofIfMsghdr", Const, 0}, + {"SizeofIfaMsghdr", Const, 0}, + {"SizeofIfmaMsghdr", Const, 0}, + {"SizeofIfmaMsghdr2", Const, 0}, + {"SizeofInet4Pktinfo", Const, 0}, + {"SizeofInet6Pktinfo", Const, 0}, + {"SizeofInotifyEvent", Const, 0}, + {"SizeofLinger", Const, 0}, + {"SizeofMsghdr", Const, 0}, + {"SizeofNlAttr", Const, 0}, + {"SizeofNlMsgerr", Const, 0}, + {"SizeofNlMsghdr", Const, 0}, + {"SizeofRtAttr", Const, 0}, + {"SizeofRtGenmsg", Const, 0}, + {"SizeofRtMetrics", Const, 0}, + {"SizeofRtMsg", Const, 0}, + {"SizeofRtMsghdr", Const, 0}, + {"SizeofRtNexthop", Const, 0}, + {"SizeofSockFilter", Const, 0}, + {"SizeofSockFprog", Const, 0}, + {"SizeofSockaddrAny", Const, 0}, + {"SizeofSockaddrDatalink", Const, 0}, + {"SizeofSockaddrInet4", Const, 0}, + {"SizeofSockaddrInet6", Const, 0}, + {"SizeofSockaddrLinklayer", Const, 0}, + {"SizeofSockaddrNetlink", Const, 0}, + {"SizeofSockaddrUnix", Const, 0}, + {"SizeofTCPInfo", Const, 1}, + {"SizeofUcred", Const, 0}, + {"SlicePtrFromStrings", Func, 1}, + {"SockFilter", Type, 0}, + {"SockFilter.Code", Field, 0}, + {"SockFilter.Jf", Field, 0}, + {"SockFilter.Jt", Field, 0}, + {"SockFilter.K", Field, 0}, + {"SockFprog", Type, 0}, + {"SockFprog.Filter", Field, 0}, + {"SockFprog.Len", Field, 0}, + {"SockFprog.Pad_cgo_0", Field, 0}, + {"Sockaddr", Type, 0}, + {"SockaddrDatalink", Type, 0}, + {"SockaddrDatalink.Alen", Field, 0}, + {"SockaddrDatalink.Data", Field, 0}, + {"SockaddrDatalink.Family", Field, 0}, + {"SockaddrDatalink.Index", Field, 0}, + {"SockaddrDatalink.Len", Field, 0}, + {"SockaddrDatalink.Nlen", Field, 0}, + {"SockaddrDatalink.Slen", Field, 0}, + {"SockaddrDatalink.Type", Field, 0}, + {"SockaddrGen", Type, 0}, + {"SockaddrInet4", Type, 0}, + {"SockaddrInet4.Addr", Field, 0}, + {"SockaddrInet4.Port", Field, 0}, + {"SockaddrInet6", Type, 0}, + {"SockaddrInet6.Addr", Field, 0}, + {"SockaddrInet6.Port", Field, 0}, + {"SockaddrInet6.ZoneId", Field, 0}, + {"SockaddrLinklayer", Type, 0}, + {"SockaddrLinklayer.Addr", Field, 0}, + {"SockaddrLinklayer.Halen", Field, 0}, + {"SockaddrLinklayer.Hatype", Field, 0}, + {"SockaddrLinklayer.Ifindex", Field, 0}, + {"SockaddrLinklayer.Pkttype", Field, 0}, + {"SockaddrLinklayer.Protocol", Field, 0}, + {"SockaddrNetlink", Type, 0}, + {"SockaddrNetlink.Family", Field, 0}, + {"SockaddrNetlink.Groups", Field, 0}, + {"SockaddrNetlink.Pad", Field, 0}, + {"SockaddrNetlink.Pid", Field, 0}, + {"SockaddrUnix", Type, 0}, + {"SockaddrUnix.Name", Field, 0}, + {"Socket", Func, 0}, + {"SocketControlMessage", Type, 0}, + {"SocketControlMessage.Data", Field, 0}, + {"SocketControlMessage.Header", Field, 0}, + {"SocketDisableIPv6", Var, 0}, + {"Socketpair", Func, 0}, + {"Splice", Func, 0}, + {"StartProcess", Func, 0}, + {"StartupInfo", Type, 0}, + {"StartupInfo.Cb", Field, 0}, + {"StartupInfo.Desktop", Field, 0}, + {"StartupInfo.FillAttribute", Field, 0}, + {"StartupInfo.Flags", Field, 0}, + {"StartupInfo.ShowWindow", Field, 0}, + {"StartupInfo.StdErr", Field, 0}, + {"StartupInfo.StdInput", Field, 0}, + {"StartupInfo.StdOutput", Field, 0}, + {"StartupInfo.Title", Field, 0}, + {"StartupInfo.X", Field, 0}, + {"StartupInfo.XCountChars", Field, 0}, + {"StartupInfo.XSize", Field, 0}, + {"StartupInfo.Y", Field, 0}, + {"StartupInfo.YCountChars", Field, 0}, + {"StartupInfo.YSize", Field, 0}, + {"Stat", Func, 0}, + {"Stat_t", Type, 0}, + {"Stat_t.Atim", Field, 0}, + {"Stat_t.Atim_ext", Field, 12}, + {"Stat_t.Atimespec", Field, 0}, + {"Stat_t.Birthtimespec", Field, 0}, + {"Stat_t.Blksize", Field, 0}, + {"Stat_t.Blocks", Field, 0}, + {"Stat_t.Btim_ext", Field, 12}, + {"Stat_t.Ctim", Field, 0}, + {"Stat_t.Ctim_ext", Field, 12}, + {"Stat_t.Ctimespec", Field, 0}, + {"Stat_t.Dev", Field, 0}, + {"Stat_t.Flags", Field, 0}, + {"Stat_t.Gen", Field, 0}, + {"Stat_t.Gid", Field, 0}, + {"Stat_t.Ino", Field, 0}, + {"Stat_t.Lspare", Field, 0}, + {"Stat_t.Lspare0", Field, 2}, + {"Stat_t.Lspare1", Field, 2}, + {"Stat_t.Mode", Field, 0}, + {"Stat_t.Mtim", Field, 0}, + {"Stat_t.Mtim_ext", Field, 12}, + {"Stat_t.Mtimespec", Field, 0}, + {"Stat_t.Nlink", Field, 0}, + {"Stat_t.Pad_cgo_0", Field, 0}, + {"Stat_t.Pad_cgo_1", Field, 0}, + {"Stat_t.Pad_cgo_2", Field, 0}, + {"Stat_t.Padding0", Field, 12}, + {"Stat_t.Padding1", Field, 12}, + {"Stat_t.Qspare", Field, 0}, + {"Stat_t.Rdev", Field, 0}, + {"Stat_t.Size", Field, 0}, + {"Stat_t.Spare", Field, 2}, + {"Stat_t.Uid", Field, 0}, + {"Stat_t.X__pad0", Field, 0}, + {"Stat_t.X__pad1", Field, 0}, + {"Stat_t.X__pad2", Field, 0}, + {"Stat_t.X__st_birthtim", Field, 2}, + {"Stat_t.X__st_ino", Field, 0}, + {"Stat_t.X__unused", Field, 0}, + {"Statfs", Func, 0}, + {"Statfs_t", Type, 0}, + {"Statfs_t.Asyncreads", Field, 0}, + {"Statfs_t.Asyncwrites", Field, 0}, + {"Statfs_t.Bavail", Field, 0}, + {"Statfs_t.Bfree", Field, 0}, + {"Statfs_t.Blocks", Field, 0}, + {"Statfs_t.Bsize", Field, 0}, + {"Statfs_t.Charspare", Field, 0}, + {"Statfs_t.F_asyncreads", Field, 2}, + {"Statfs_t.F_asyncwrites", Field, 2}, + {"Statfs_t.F_bavail", Field, 2}, + {"Statfs_t.F_bfree", Field, 2}, + {"Statfs_t.F_blocks", Field, 2}, + {"Statfs_t.F_bsize", Field, 2}, + {"Statfs_t.F_ctime", Field, 2}, + {"Statfs_t.F_favail", Field, 2}, + {"Statfs_t.F_ffree", Field, 2}, + {"Statfs_t.F_files", Field, 2}, + {"Statfs_t.F_flags", Field, 2}, + {"Statfs_t.F_fsid", Field, 2}, + {"Statfs_t.F_fstypename", Field, 2}, + {"Statfs_t.F_iosize", Field, 2}, + {"Statfs_t.F_mntfromname", Field, 2}, + {"Statfs_t.F_mntfromspec", Field, 3}, + {"Statfs_t.F_mntonname", Field, 2}, + {"Statfs_t.F_namemax", Field, 2}, + {"Statfs_t.F_owner", Field, 2}, + {"Statfs_t.F_spare", Field, 2}, + {"Statfs_t.F_syncreads", Field, 2}, + {"Statfs_t.F_syncwrites", Field, 2}, + {"Statfs_t.Ffree", Field, 0}, + {"Statfs_t.Files", Field, 0}, + {"Statfs_t.Flags", Field, 0}, + {"Statfs_t.Frsize", Field, 0}, + {"Statfs_t.Fsid", Field, 0}, + {"Statfs_t.Fssubtype", Field, 0}, + {"Statfs_t.Fstypename", Field, 0}, + {"Statfs_t.Iosize", Field, 0}, + {"Statfs_t.Mntfromname", Field, 0}, + {"Statfs_t.Mntonname", Field, 0}, + {"Statfs_t.Mount_info", Field, 2}, + {"Statfs_t.Namelen", Field, 0}, + {"Statfs_t.Namemax", Field, 0}, + {"Statfs_t.Owner", Field, 0}, + {"Statfs_t.Pad_cgo_0", Field, 0}, + {"Statfs_t.Pad_cgo_1", Field, 2}, + {"Statfs_t.Reserved", Field, 0}, + {"Statfs_t.Spare", Field, 0}, + {"Statfs_t.Syncreads", Field, 0}, + {"Statfs_t.Syncwrites", Field, 0}, + {"Statfs_t.Type", Field, 0}, + {"Statfs_t.Version", Field, 0}, + {"Stderr", Var, 0}, + {"Stdin", Var, 0}, + {"Stdout", Var, 0}, + {"StringBytePtr", Func, 0}, + {"StringByteSlice", Func, 0}, + {"StringSlicePtr", Func, 0}, + {"StringToSid", Func, 0}, + {"StringToUTF16", Func, 0}, + {"StringToUTF16Ptr", Func, 0}, + {"Symlink", Func, 0}, + {"Sync", Func, 0}, + {"SyncFileRange", Func, 0}, + {"SysProcAttr", Type, 0}, + {"SysProcAttr.AdditionalInheritedHandles", Field, 17}, + {"SysProcAttr.AmbientCaps", Field, 9}, + {"SysProcAttr.CgroupFD", Field, 20}, + {"SysProcAttr.Chroot", Field, 0}, + {"SysProcAttr.Cloneflags", Field, 2}, + {"SysProcAttr.CmdLine", Field, 0}, + {"SysProcAttr.CreationFlags", Field, 1}, + {"SysProcAttr.Credential", Field, 0}, + {"SysProcAttr.Ctty", Field, 1}, + {"SysProcAttr.Foreground", Field, 5}, + {"SysProcAttr.GidMappings", Field, 4}, + {"SysProcAttr.GidMappingsEnableSetgroups", Field, 5}, + {"SysProcAttr.HideWindow", Field, 0}, + {"SysProcAttr.Jail", Field, 21}, + {"SysProcAttr.NoInheritHandles", Field, 16}, + {"SysProcAttr.Noctty", Field, 0}, + {"SysProcAttr.ParentProcess", Field, 17}, + {"SysProcAttr.Pdeathsig", Field, 0}, + {"SysProcAttr.Pgid", Field, 5}, + {"SysProcAttr.PidFD", Field, 22}, + {"SysProcAttr.ProcessAttributes", Field, 13}, + {"SysProcAttr.Ptrace", Field, 0}, + {"SysProcAttr.Setctty", Field, 0}, + {"SysProcAttr.Setpgid", Field, 0}, + {"SysProcAttr.Setsid", Field, 0}, + {"SysProcAttr.ThreadAttributes", Field, 13}, + {"SysProcAttr.Token", Field, 10}, + {"SysProcAttr.UidMappings", Field, 4}, + {"SysProcAttr.Unshareflags", Field, 7}, + {"SysProcAttr.UseCgroupFD", Field, 20}, + {"SysProcIDMap", Type, 4}, + {"SysProcIDMap.ContainerID", Field, 4}, + {"SysProcIDMap.HostID", Field, 4}, + {"SysProcIDMap.Size", Field, 4}, + {"Syscall", Func, 0}, + {"Syscall12", Func, 0}, + {"Syscall15", Func, 0}, + {"Syscall18", Func, 12}, + {"Syscall6", Func, 0}, + {"Syscall9", Func, 0}, + {"SyscallN", Func, 18}, + {"Sysctl", Func, 0}, + {"SysctlUint32", Func, 0}, + {"Sysctlnode", Type, 2}, + {"Sysctlnode.Flags", Field, 2}, + {"Sysctlnode.Name", Field, 2}, + {"Sysctlnode.Num", Field, 2}, + {"Sysctlnode.Un", Field, 2}, + {"Sysctlnode.Ver", Field, 2}, + {"Sysctlnode.X__rsvd", Field, 2}, + {"Sysctlnode.X_sysctl_desc", Field, 2}, + {"Sysctlnode.X_sysctl_func", Field, 2}, + {"Sysctlnode.X_sysctl_parent", Field, 2}, + {"Sysctlnode.X_sysctl_size", Field, 2}, + {"Sysinfo", Func, 0}, + {"Sysinfo_t", Type, 0}, + {"Sysinfo_t.Bufferram", Field, 0}, + {"Sysinfo_t.Freehigh", Field, 0}, + {"Sysinfo_t.Freeram", Field, 0}, + {"Sysinfo_t.Freeswap", Field, 0}, + {"Sysinfo_t.Loads", Field, 0}, + {"Sysinfo_t.Pad", Field, 0}, + {"Sysinfo_t.Pad_cgo_0", Field, 0}, + {"Sysinfo_t.Pad_cgo_1", Field, 0}, + {"Sysinfo_t.Procs", Field, 0}, + {"Sysinfo_t.Sharedram", Field, 0}, + {"Sysinfo_t.Totalhigh", Field, 0}, + {"Sysinfo_t.Totalram", Field, 0}, + {"Sysinfo_t.Totalswap", Field, 0}, + {"Sysinfo_t.Unit", Field, 0}, + {"Sysinfo_t.Uptime", Field, 0}, + {"Sysinfo_t.X_f", Field, 0}, + {"Systemtime", Type, 0}, + {"Systemtime.Day", Field, 0}, + {"Systemtime.DayOfWeek", Field, 0}, + {"Systemtime.Hour", Field, 0}, + {"Systemtime.Milliseconds", Field, 0}, + {"Systemtime.Minute", Field, 0}, + {"Systemtime.Month", Field, 0}, + {"Systemtime.Second", Field, 0}, + {"Systemtime.Year", Field, 0}, + {"TCGETS", Const, 0}, + {"TCIFLUSH", Const, 1}, + {"TCIOFLUSH", Const, 1}, + {"TCOFLUSH", Const, 1}, + {"TCPInfo", Type, 1}, + {"TCPInfo.Advmss", Field, 1}, + {"TCPInfo.Ato", Field, 1}, + {"TCPInfo.Backoff", Field, 1}, + {"TCPInfo.Ca_state", Field, 1}, + {"TCPInfo.Fackets", Field, 1}, + {"TCPInfo.Last_ack_recv", Field, 1}, + {"TCPInfo.Last_ack_sent", Field, 1}, + {"TCPInfo.Last_data_recv", Field, 1}, + {"TCPInfo.Last_data_sent", Field, 1}, + {"TCPInfo.Lost", Field, 1}, + {"TCPInfo.Options", Field, 1}, + {"TCPInfo.Pad_cgo_0", Field, 1}, + {"TCPInfo.Pmtu", Field, 1}, + {"TCPInfo.Probes", Field, 1}, + {"TCPInfo.Rcv_mss", Field, 1}, + {"TCPInfo.Rcv_rtt", Field, 1}, + {"TCPInfo.Rcv_space", Field, 1}, + {"TCPInfo.Rcv_ssthresh", Field, 1}, + {"TCPInfo.Reordering", Field, 1}, + {"TCPInfo.Retrans", Field, 1}, + {"TCPInfo.Retransmits", Field, 1}, + {"TCPInfo.Rto", Field, 1}, + {"TCPInfo.Rtt", Field, 1}, + {"TCPInfo.Rttvar", Field, 1}, + {"TCPInfo.Sacked", Field, 1}, + {"TCPInfo.Snd_cwnd", Field, 1}, + {"TCPInfo.Snd_mss", Field, 1}, + {"TCPInfo.Snd_ssthresh", Field, 1}, + {"TCPInfo.State", Field, 1}, + {"TCPInfo.Total_retrans", Field, 1}, + {"TCPInfo.Unacked", Field, 1}, + {"TCPKeepalive", Type, 3}, + {"TCPKeepalive.Interval", Field, 3}, + {"TCPKeepalive.OnOff", Field, 3}, + {"TCPKeepalive.Time", Field, 3}, + {"TCP_CA_NAME_MAX", Const, 0}, + {"TCP_CONGCTL", Const, 1}, + {"TCP_CONGESTION", Const, 0}, + {"TCP_CONNECTIONTIMEOUT", Const, 0}, + {"TCP_CORK", Const, 0}, + {"TCP_DEFER_ACCEPT", Const, 0}, + {"TCP_ENABLE_ECN", Const, 16}, + {"TCP_INFO", Const, 0}, + {"TCP_KEEPALIVE", Const, 0}, + {"TCP_KEEPCNT", Const, 0}, + {"TCP_KEEPIDLE", Const, 0}, + {"TCP_KEEPINIT", Const, 1}, + {"TCP_KEEPINTVL", Const, 0}, + {"TCP_LINGER2", Const, 0}, + {"TCP_MAXBURST", Const, 0}, + {"TCP_MAXHLEN", Const, 0}, + {"TCP_MAXOLEN", Const, 0}, + {"TCP_MAXSEG", Const, 0}, + {"TCP_MAXWIN", Const, 0}, + {"TCP_MAX_SACK", Const, 0}, + {"TCP_MAX_WINSHIFT", Const, 0}, + {"TCP_MD5SIG", Const, 0}, + {"TCP_MD5SIG_MAXKEYLEN", Const, 0}, + {"TCP_MINMSS", Const, 0}, + {"TCP_MINMSSOVERLOAD", Const, 0}, + {"TCP_MSS", Const, 0}, + {"TCP_NODELAY", Const, 0}, + {"TCP_NOOPT", Const, 0}, + {"TCP_NOPUSH", Const, 0}, + {"TCP_NOTSENT_LOWAT", Const, 16}, + {"TCP_NSTATES", Const, 1}, + {"TCP_QUICKACK", Const, 0}, + {"TCP_RXT_CONNDROPTIME", Const, 0}, + {"TCP_RXT_FINDROP", Const, 0}, + {"TCP_SACK_ENABLE", Const, 1}, + {"TCP_SENDMOREACKS", Const, 16}, + {"TCP_SYNCNT", Const, 0}, + {"TCP_VENDOR", Const, 3}, + {"TCP_WINDOW_CLAMP", Const, 0}, + {"TCSAFLUSH", Const, 1}, + {"TCSETS", Const, 0}, + {"TF_DISCONNECT", Const, 0}, + {"TF_REUSE_SOCKET", Const, 0}, + {"TF_USE_DEFAULT_WORKER", Const, 0}, + {"TF_USE_KERNEL_APC", Const, 0}, + {"TF_USE_SYSTEM_THREAD", Const, 0}, + {"TF_WRITE_BEHIND", Const, 0}, + {"TH32CS_INHERIT", Const, 4}, + {"TH32CS_SNAPALL", Const, 4}, + {"TH32CS_SNAPHEAPLIST", Const, 4}, + {"TH32CS_SNAPMODULE", Const, 4}, + {"TH32CS_SNAPMODULE32", Const, 4}, + {"TH32CS_SNAPPROCESS", Const, 4}, + {"TH32CS_SNAPTHREAD", Const, 4}, + {"TIME_ZONE_ID_DAYLIGHT", Const, 0}, + {"TIME_ZONE_ID_STANDARD", Const, 0}, + {"TIME_ZONE_ID_UNKNOWN", Const, 0}, + {"TIOCCBRK", Const, 0}, + {"TIOCCDTR", Const, 0}, + {"TIOCCONS", Const, 0}, + {"TIOCDCDTIMESTAMP", Const, 0}, + {"TIOCDRAIN", Const, 0}, + {"TIOCDSIMICROCODE", Const, 0}, + {"TIOCEXCL", Const, 0}, + {"TIOCEXT", Const, 0}, + {"TIOCFLAG_CDTRCTS", Const, 1}, + {"TIOCFLAG_CLOCAL", Const, 1}, + {"TIOCFLAG_CRTSCTS", Const, 1}, + {"TIOCFLAG_MDMBUF", Const, 1}, + {"TIOCFLAG_PPS", Const, 1}, + {"TIOCFLAG_SOFTCAR", Const, 1}, + {"TIOCFLUSH", Const, 0}, + {"TIOCGDEV", Const, 0}, + {"TIOCGDRAINWAIT", Const, 0}, + {"TIOCGETA", Const, 0}, + {"TIOCGETD", Const, 0}, + {"TIOCGFLAGS", Const, 1}, + {"TIOCGICOUNT", Const, 0}, + {"TIOCGLCKTRMIOS", Const, 0}, + {"TIOCGLINED", Const, 1}, + {"TIOCGPGRP", Const, 0}, + {"TIOCGPTN", Const, 0}, + {"TIOCGQSIZE", Const, 1}, + {"TIOCGRANTPT", Const, 1}, + {"TIOCGRS485", Const, 0}, + {"TIOCGSERIAL", Const, 0}, + {"TIOCGSID", Const, 0}, + {"TIOCGSIZE", Const, 1}, + {"TIOCGSOFTCAR", Const, 0}, + {"TIOCGTSTAMP", Const, 1}, + {"TIOCGWINSZ", Const, 0}, + {"TIOCINQ", Const, 0}, + {"TIOCIXOFF", Const, 0}, + {"TIOCIXON", Const, 0}, + {"TIOCLINUX", Const, 0}, + {"TIOCMBIC", Const, 0}, + {"TIOCMBIS", Const, 0}, + {"TIOCMGDTRWAIT", Const, 0}, + {"TIOCMGET", Const, 0}, + {"TIOCMIWAIT", Const, 0}, + {"TIOCMODG", Const, 0}, + {"TIOCMODS", Const, 0}, + {"TIOCMSDTRWAIT", Const, 0}, + {"TIOCMSET", Const, 0}, + {"TIOCM_CAR", Const, 0}, + {"TIOCM_CD", Const, 0}, + {"TIOCM_CTS", Const, 0}, + {"TIOCM_DCD", Const, 0}, + {"TIOCM_DSR", Const, 0}, + {"TIOCM_DTR", Const, 0}, + {"TIOCM_LE", Const, 0}, + {"TIOCM_RI", Const, 0}, + {"TIOCM_RNG", Const, 0}, + {"TIOCM_RTS", Const, 0}, + {"TIOCM_SR", Const, 0}, + {"TIOCM_ST", Const, 0}, + {"TIOCNOTTY", Const, 0}, + {"TIOCNXCL", Const, 0}, + {"TIOCOUTQ", Const, 0}, + {"TIOCPKT", Const, 0}, + {"TIOCPKT_DATA", Const, 0}, + {"TIOCPKT_DOSTOP", Const, 0}, + {"TIOCPKT_FLUSHREAD", Const, 0}, + {"TIOCPKT_FLUSHWRITE", Const, 0}, + {"TIOCPKT_IOCTL", Const, 0}, + {"TIOCPKT_NOSTOP", Const, 0}, + {"TIOCPKT_START", Const, 0}, + {"TIOCPKT_STOP", Const, 0}, + {"TIOCPTMASTER", Const, 0}, + {"TIOCPTMGET", Const, 1}, + {"TIOCPTSNAME", Const, 1}, + {"TIOCPTYGNAME", Const, 0}, + {"TIOCPTYGRANT", Const, 0}, + {"TIOCPTYUNLK", Const, 0}, + {"TIOCRCVFRAME", Const, 1}, + {"TIOCREMOTE", Const, 0}, + {"TIOCSBRK", Const, 0}, + {"TIOCSCONS", Const, 0}, + {"TIOCSCTTY", Const, 0}, + {"TIOCSDRAINWAIT", Const, 0}, + {"TIOCSDTR", Const, 0}, + {"TIOCSERCONFIG", Const, 0}, + {"TIOCSERGETLSR", Const, 0}, + {"TIOCSERGETMULTI", Const, 0}, + {"TIOCSERGSTRUCT", Const, 0}, + {"TIOCSERGWILD", Const, 0}, + {"TIOCSERSETMULTI", Const, 0}, + {"TIOCSERSWILD", Const, 0}, + {"TIOCSER_TEMT", Const, 0}, + {"TIOCSETA", Const, 0}, + {"TIOCSETAF", Const, 0}, + {"TIOCSETAW", Const, 0}, + {"TIOCSETD", Const, 0}, + {"TIOCSFLAGS", Const, 1}, + {"TIOCSIG", Const, 0}, + {"TIOCSLCKTRMIOS", Const, 0}, + {"TIOCSLINED", Const, 1}, + {"TIOCSPGRP", Const, 0}, + {"TIOCSPTLCK", Const, 0}, + {"TIOCSQSIZE", Const, 1}, + {"TIOCSRS485", Const, 0}, + {"TIOCSSERIAL", Const, 0}, + {"TIOCSSIZE", Const, 1}, + {"TIOCSSOFTCAR", Const, 0}, + {"TIOCSTART", Const, 0}, + {"TIOCSTAT", Const, 0}, + {"TIOCSTI", Const, 0}, + {"TIOCSTOP", Const, 0}, + {"TIOCSTSTAMP", Const, 1}, + {"TIOCSWINSZ", Const, 0}, + {"TIOCTIMESTAMP", Const, 0}, + {"TIOCUCNTL", Const, 0}, + {"TIOCVHANGUP", Const, 0}, + {"TIOCXMTFRAME", Const, 1}, + {"TOKEN_ADJUST_DEFAULT", Const, 0}, + {"TOKEN_ADJUST_GROUPS", Const, 0}, + {"TOKEN_ADJUST_PRIVILEGES", Const, 0}, + {"TOKEN_ADJUST_SESSIONID", Const, 11}, + {"TOKEN_ALL_ACCESS", Const, 0}, + {"TOKEN_ASSIGN_PRIMARY", Const, 0}, + {"TOKEN_DUPLICATE", Const, 0}, + {"TOKEN_EXECUTE", Const, 0}, + {"TOKEN_IMPERSONATE", Const, 0}, + {"TOKEN_QUERY", Const, 0}, + {"TOKEN_QUERY_SOURCE", Const, 0}, + {"TOKEN_READ", Const, 0}, + {"TOKEN_WRITE", Const, 0}, + {"TOSTOP", Const, 0}, + {"TRUNCATE_EXISTING", Const, 0}, + {"TUNATTACHFILTER", Const, 0}, + {"TUNDETACHFILTER", Const, 0}, + {"TUNGETFEATURES", Const, 0}, + {"TUNGETIFF", Const, 0}, + {"TUNGETSNDBUF", Const, 0}, + {"TUNGETVNETHDRSZ", Const, 0}, + {"TUNSETDEBUG", Const, 0}, + {"TUNSETGROUP", Const, 0}, + {"TUNSETIFF", Const, 0}, + {"TUNSETLINK", Const, 0}, + {"TUNSETNOCSUM", Const, 0}, + {"TUNSETOFFLOAD", Const, 0}, + {"TUNSETOWNER", Const, 0}, + {"TUNSETPERSIST", Const, 0}, + {"TUNSETSNDBUF", Const, 0}, + {"TUNSETTXFILTER", Const, 0}, + {"TUNSETVNETHDRSZ", Const, 0}, + {"Tee", Func, 0}, + {"TerminateProcess", Func, 0}, + {"Termios", Type, 0}, + {"Termios.Cc", Field, 0}, + {"Termios.Cflag", Field, 0}, + {"Termios.Iflag", Field, 0}, + {"Termios.Ispeed", Field, 0}, + {"Termios.Lflag", Field, 0}, + {"Termios.Line", Field, 0}, + {"Termios.Oflag", Field, 0}, + {"Termios.Ospeed", Field, 0}, + {"Termios.Pad_cgo_0", Field, 0}, + {"Tgkill", Func, 0}, + {"Time", Func, 0}, + {"Time_t", Type, 0}, + {"Times", Func, 0}, + {"Timespec", Type, 0}, + {"Timespec.Nsec", Field, 0}, + {"Timespec.Pad_cgo_0", Field, 2}, + {"Timespec.Sec", Field, 0}, + {"TimespecToNsec", Func, 0}, + {"Timeval", Type, 0}, + {"Timeval.Pad_cgo_0", Field, 0}, + {"Timeval.Sec", Field, 0}, + {"Timeval.Usec", Field, 0}, + {"Timeval32", Type, 0}, + {"Timeval32.Sec", Field, 0}, + {"Timeval32.Usec", Field, 0}, + {"TimevalToNsec", Func, 0}, + {"Timex", Type, 0}, + {"Timex.Calcnt", Field, 0}, + {"Timex.Constant", Field, 0}, + {"Timex.Errcnt", Field, 0}, + {"Timex.Esterror", Field, 0}, + {"Timex.Freq", Field, 0}, + {"Timex.Jitcnt", Field, 0}, + {"Timex.Jitter", Field, 0}, + {"Timex.Maxerror", Field, 0}, + {"Timex.Modes", Field, 0}, + {"Timex.Offset", Field, 0}, + {"Timex.Pad_cgo_0", Field, 0}, + {"Timex.Pad_cgo_1", Field, 0}, + {"Timex.Pad_cgo_2", Field, 0}, + {"Timex.Pad_cgo_3", Field, 0}, + {"Timex.Ppsfreq", Field, 0}, + {"Timex.Precision", Field, 0}, + {"Timex.Shift", Field, 0}, + {"Timex.Stabil", Field, 0}, + {"Timex.Status", Field, 0}, + {"Timex.Stbcnt", Field, 0}, + {"Timex.Tai", Field, 0}, + {"Timex.Tick", Field, 0}, + {"Timex.Time", Field, 0}, + {"Timex.Tolerance", Field, 0}, + {"Timezoneinformation", Type, 0}, + {"Timezoneinformation.Bias", Field, 0}, + {"Timezoneinformation.DaylightBias", Field, 0}, + {"Timezoneinformation.DaylightDate", Field, 0}, + {"Timezoneinformation.DaylightName", Field, 0}, + {"Timezoneinformation.StandardBias", Field, 0}, + {"Timezoneinformation.StandardDate", Field, 0}, + {"Timezoneinformation.StandardName", Field, 0}, + {"Tms", Type, 0}, + {"Tms.Cstime", Field, 0}, + {"Tms.Cutime", Field, 0}, + {"Tms.Stime", Field, 0}, + {"Tms.Utime", Field, 0}, + {"Token", Type, 0}, + {"TokenAccessInformation", Const, 0}, + {"TokenAuditPolicy", Const, 0}, + {"TokenDefaultDacl", Const, 0}, + {"TokenElevation", Const, 0}, + {"TokenElevationType", Const, 0}, + {"TokenGroups", Const, 0}, + {"TokenGroupsAndPrivileges", Const, 0}, + {"TokenHasRestrictions", Const, 0}, + {"TokenImpersonationLevel", Const, 0}, + {"TokenIntegrityLevel", Const, 0}, + {"TokenLinkedToken", Const, 0}, + {"TokenLogonSid", Const, 0}, + {"TokenMandatoryPolicy", Const, 0}, + {"TokenOrigin", Const, 0}, + {"TokenOwner", Const, 0}, + {"TokenPrimaryGroup", Const, 0}, + {"TokenPrivileges", Const, 0}, + {"TokenRestrictedSids", Const, 0}, + {"TokenSandBoxInert", Const, 0}, + {"TokenSessionId", Const, 0}, + {"TokenSessionReference", Const, 0}, + {"TokenSource", Const, 0}, + {"TokenStatistics", Const, 0}, + {"TokenType", Const, 0}, + {"TokenUIAccess", Const, 0}, + {"TokenUser", Const, 0}, + {"TokenVirtualizationAllowed", Const, 0}, + {"TokenVirtualizationEnabled", Const, 0}, + {"Tokenprimarygroup", Type, 0}, + {"Tokenprimarygroup.PrimaryGroup", Field, 0}, + {"Tokenuser", Type, 0}, + {"Tokenuser.User", Field, 0}, + {"TranslateAccountName", Func, 0}, + {"TranslateName", Func, 0}, + {"TransmitFile", Func, 0}, + {"TransmitFileBuffers", Type, 0}, + {"TransmitFileBuffers.Head", Field, 0}, + {"TransmitFileBuffers.HeadLength", Field, 0}, + {"TransmitFileBuffers.Tail", Field, 0}, + {"TransmitFileBuffers.TailLength", Field, 0}, + {"Truncate", Func, 0}, + {"UNIX_PATH_MAX", Const, 12}, + {"USAGE_MATCH_TYPE_AND", Const, 0}, + {"USAGE_MATCH_TYPE_OR", Const, 0}, + {"UTF16FromString", Func, 1}, + {"UTF16PtrFromString", Func, 1}, + {"UTF16ToString", Func, 0}, + {"Ucred", Type, 0}, + {"Ucred.Gid", Field, 0}, + {"Ucred.Pid", Field, 0}, + {"Ucred.Uid", Field, 0}, + {"Umask", Func, 0}, + {"Uname", Func, 0}, + {"Undelete", Func, 0}, + {"UnixCredentials", Func, 0}, + {"UnixRights", Func, 0}, + {"Unlink", Func, 0}, + {"Unlinkat", Func, 0}, + {"UnmapViewOfFile", Func, 0}, + {"Unmount", Func, 0}, + {"Unsetenv", Func, 4}, + {"Unshare", Func, 0}, + {"UserInfo10", Type, 0}, + {"UserInfo10.Comment", Field, 0}, + {"UserInfo10.FullName", Field, 0}, + {"UserInfo10.Name", Field, 0}, + {"UserInfo10.UsrComment", Field, 0}, + {"Ustat", Func, 0}, + {"Ustat_t", Type, 0}, + {"Ustat_t.Fname", Field, 0}, + {"Ustat_t.Fpack", Field, 0}, + {"Ustat_t.Pad_cgo_0", Field, 0}, + {"Ustat_t.Pad_cgo_1", Field, 0}, + {"Ustat_t.Tfree", Field, 0}, + {"Ustat_t.Tinode", Field, 0}, + {"Utimbuf", Type, 0}, + {"Utimbuf.Actime", Field, 0}, + {"Utimbuf.Modtime", Field, 0}, + {"Utime", Func, 0}, + {"Utimes", Func, 0}, + {"UtimesNano", Func, 1}, + {"Utsname", Type, 0}, + {"Utsname.Domainname", Field, 0}, + {"Utsname.Machine", Field, 0}, + {"Utsname.Nodename", Field, 0}, + {"Utsname.Release", Field, 0}, + {"Utsname.Sysname", Field, 0}, + {"Utsname.Version", Field, 0}, + {"VDISCARD", Const, 0}, + {"VDSUSP", Const, 1}, + {"VEOF", Const, 0}, + {"VEOL", Const, 0}, + {"VEOL2", Const, 0}, + {"VERASE", Const, 0}, + {"VERASE2", Const, 1}, + {"VINTR", Const, 0}, + {"VKILL", Const, 0}, + {"VLNEXT", Const, 0}, + {"VMIN", Const, 0}, + {"VQUIT", Const, 0}, + {"VREPRINT", Const, 0}, + {"VSTART", Const, 0}, + {"VSTATUS", Const, 1}, + {"VSTOP", Const, 0}, + {"VSUSP", Const, 0}, + {"VSWTC", Const, 0}, + {"VT0", Const, 1}, + {"VT1", Const, 1}, + {"VTDLY", Const, 1}, + {"VTIME", Const, 0}, + {"VWERASE", Const, 0}, + {"VirtualLock", Func, 0}, + {"VirtualUnlock", Func, 0}, + {"WAIT_ABANDONED", Const, 0}, + {"WAIT_FAILED", Const, 0}, + {"WAIT_OBJECT_0", Const, 0}, + {"WAIT_TIMEOUT", Const, 0}, + {"WALL", Const, 0}, + {"WALLSIG", Const, 1}, + {"WALTSIG", Const, 1}, + {"WCLONE", Const, 0}, + {"WCONTINUED", Const, 0}, + {"WCOREFLAG", Const, 0}, + {"WEXITED", Const, 0}, + {"WLINUXCLONE", Const, 0}, + {"WNOHANG", Const, 0}, + {"WNOTHREAD", Const, 0}, + {"WNOWAIT", Const, 0}, + {"WNOZOMBIE", Const, 1}, + {"WOPTSCHECKED", Const, 1}, + {"WORDSIZE", Const, 0}, + {"WSABuf", Type, 0}, + {"WSABuf.Buf", Field, 0}, + {"WSABuf.Len", Field, 0}, + {"WSACleanup", Func, 0}, + {"WSADESCRIPTION_LEN", Const, 0}, + {"WSAData", Type, 0}, + {"WSAData.Description", Field, 0}, + {"WSAData.HighVersion", Field, 0}, + {"WSAData.MaxSockets", Field, 0}, + {"WSAData.MaxUdpDg", Field, 0}, + {"WSAData.SystemStatus", Field, 0}, + {"WSAData.VendorInfo", Field, 0}, + {"WSAData.Version", Field, 0}, + {"WSAEACCES", Const, 2}, + {"WSAECONNABORTED", Const, 9}, + {"WSAECONNRESET", Const, 3}, + {"WSAEnumProtocols", Func, 2}, + {"WSAID_CONNECTEX", Var, 1}, + {"WSAIoctl", Func, 0}, + {"WSAPROTOCOL_LEN", Const, 2}, + {"WSAProtocolChain", Type, 2}, + {"WSAProtocolChain.ChainEntries", Field, 2}, + {"WSAProtocolChain.ChainLen", Field, 2}, + {"WSAProtocolInfo", Type, 2}, + {"WSAProtocolInfo.AddressFamily", Field, 2}, + {"WSAProtocolInfo.CatalogEntryId", Field, 2}, + {"WSAProtocolInfo.MaxSockAddr", Field, 2}, + {"WSAProtocolInfo.MessageSize", Field, 2}, + {"WSAProtocolInfo.MinSockAddr", Field, 2}, + {"WSAProtocolInfo.NetworkByteOrder", Field, 2}, + {"WSAProtocolInfo.Protocol", Field, 2}, + {"WSAProtocolInfo.ProtocolChain", Field, 2}, + {"WSAProtocolInfo.ProtocolMaxOffset", Field, 2}, + {"WSAProtocolInfo.ProtocolName", Field, 2}, + {"WSAProtocolInfo.ProviderFlags", Field, 2}, + {"WSAProtocolInfo.ProviderId", Field, 2}, + {"WSAProtocolInfo.ProviderReserved", Field, 2}, + {"WSAProtocolInfo.SecurityScheme", Field, 2}, + {"WSAProtocolInfo.ServiceFlags1", Field, 2}, + {"WSAProtocolInfo.ServiceFlags2", Field, 2}, + {"WSAProtocolInfo.ServiceFlags3", Field, 2}, + {"WSAProtocolInfo.ServiceFlags4", Field, 2}, + {"WSAProtocolInfo.SocketType", Field, 2}, + {"WSAProtocolInfo.Version", Field, 2}, + {"WSARecv", Func, 0}, + {"WSARecvFrom", Func, 0}, + {"WSASYS_STATUS_LEN", Const, 0}, + {"WSASend", Func, 0}, + {"WSASendTo", Func, 0}, + {"WSASendto", Func, 0}, + {"WSAStartup", Func, 0}, + {"WSTOPPED", Const, 0}, + {"WTRAPPED", Const, 1}, + {"WUNTRACED", Const, 0}, + {"Wait4", Func, 0}, + {"WaitForSingleObject", Func, 0}, + {"WaitStatus", Type, 0}, + {"WaitStatus.ExitCode", Field, 0}, + {"Win32FileAttributeData", Type, 0}, + {"Win32FileAttributeData.CreationTime", Field, 0}, + {"Win32FileAttributeData.FileAttributes", Field, 0}, + {"Win32FileAttributeData.FileSizeHigh", Field, 0}, + {"Win32FileAttributeData.FileSizeLow", Field, 0}, + {"Win32FileAttributeData.LastAccessTime", Field, 0}, + {"Win32FileAttributeData.LastWriteTime", Field, 0}, + {"Win32finddata", Type, 0}, + {"Win32finddata.AlternateFileName", Field, 0}, + {"Win32finddata.CreationTime", Field, 0}, + {"Win32finddata.FileAttributes", Field, 0}, + {"Win32finddata.FileName", Field, 0}, + {"Win32finddata.FileSizeHigh", Field, 0}, + {"Win32finddata.FileSizeLow", Field, 0}, + {"Win32finddata.LastAccessTime", Field, 0}, + {"Win32finddata.LastWriteTime", Field, 0}, + {"Win32finddata.Reserved0", Field, 0}, + {"Win32finddata.Reserved1", Field, 0}, + {"Write", Func, 0}, + {"WriteConsole", Func, 1}, + {"WriteFile", Func, 0}, + {"X509_ASN_ENCODING", Const, 0}, + {"XCASE", Const, 0}, + {"XP1_CONNECTIONLESS", Const, 2}, + {"XP1_CONNECT_DATA", Const, 2}, + {"XP1_DISCONNECT_DATA", Const, 2}, + {"XP1_EXPEDITED_DATA", Const, 2}, + {"XP1_GRACEFUL_CLOSE", Const, 2}, + {"XP1_GUARANTEED_DELIVERY", Const, 2}, + {"XP1_GUARANTEED_ORDER", Const, 2}, + {"XP1_IFS_HANDLES", Const, 2}, + {"XP1_MESSAGE_ORIENTED", Const, 2}, + {"XP1_MULTIPOINT_CONTROL_PLANE", Const, 2}, + {"XP1_MULTIPOINT_DATA_PLANE", Const, 2}, + {"XP1_PARTIAL_MESSAGE", Const, 2}, + {"XP1_PSEUDO_STREAM", Const, 2}, + {"XP1_QOS_SUPPORTED", Const, 2}, + {"XP1_SAN_SUPPORT_SDP", Const, 2}, + {"XP1_SUPPORT_BROADCAST", Const, 2}, + {"XP1_SUPPORT_MULTIPOINT", Const, 2}, + {"XP1_UNI_RECV", Const, 2}, + {"XP1_UNI_SEND", Const, 2}, + }, + "syscall/js": { + {"CopyBytesToGo", Func, 0}, + {"CopyBytesToJS", Func, 0}, + {"Error", Type, 0}, + {"Func", Type, 0}, + {"FuncOf", Func, 0}, + {"Global", Func, 0}, + {"Null", Func, 0}, + {"Type", Type, 0}, + {"TypeBoolean", Const, 0}, + {"TypeFunction", Const, 0}, + {"TypeNull", Const, 0}, + {"TypeNumber", Const, 0}, + {"TypeObject", Const, 0}, + {"TypeString", Const, 0}, + {"TypeSymbol", Const, 0}, + {"TypeUndefined", Const, 0}, + {"Undefined", Func, 0}, + {"Value", Type, 0}, + {"ValueError", Type, 0}, + {"ValueOf", Func, 0}, + }, + "testing": { + {"(*B).Cleanup", Method, 14}, + {"(*B).Elapsed", Method, 20}, + {"(*B).Error", Method, 0}, + {"(*B).Errorf", Method, 0}, + {"(*B).Fail", Method, 0}, + {"(*B).FailNow", Method, 0}, + {"(*B).Failed", Method, 0}, + {"(*B).Fatal", Method, 0}, + {"(*B).Fatalf", Method, 0}, + {"(*B).Helper", Method, 9}, + {"(*B).Log", Method, 0}, + {"(*B).Logf", Method, 0}, + {"(*B).Name", Method, 8}, + {"(*B).ReportAllocs", Method, 1}, + {"(*B).ReportMetric", Method, 13}, + {"(*B).ResetTimer", Method, 0}, + {"(*B).Run", Method, 7}, + {"(*B).RunParallel", Method, 3}, + {"(*B).SetBytes", Method, 0}, + {"(*B).SetParallelism", Method, 3}, + {"(*B).Setenv", Method, 17}, + {"(*B).Skip", Method, 1}, + {"(*B).SkipNow", Method, 1}, + {"(*B).Skipf", Method, 1}, + {"(*B).Skipped", Method, 1}, + {"(*B).StartTimer", Method, 0}, + {"(*B).StopTimer", Method, 0}, + {"(*B).TempDir", Method, 15}, + {"(*F).Add", Method, 18}, + {"(*F).Cleanup", Method, 18}, + {"(*F).Error", Method, 18}, + {"(*F).Errorf", Method, 18}, + {"(*F).Fail", Method, 18}, + {"(*F).FailNow", Method, 18}, + {"(*F).Failed", Method, 18}, + {"(*F).Fatal", Method, 18}, + {"(*F).Fatalf", Method, 18}, + {"(*F).Fuzz", Method, 18}, + {"(*F).Helper", Method, 18}, + {"(*F).Log", Method, 18}, + {"(*F).Logf", Method, 18}, + {"(*F).Name", Method, 18}, + {"(*F).Setenv", Method, 18}, + {"(*F).Skip", Method, 18}, + {"(*F).SkipNow", Method, 18}, + {"(*F).Skipf", Method, 18}, + {"(*F).Skipped", Method, 18}, + {"(*F).TempDir", Method, 18}, + {"(*M).Run", Method, 4}, + {"(*PB).Next", Method, 3}, + {"(*T).Cleanup", Method, 14}, + {"(*T).Deadline", Method, 15}, + {"(*T).Error", Method, 0}, + {"(*T).Errorf", Method, 0}, + {"(*T).Fail", Method, 0}, + {"(*T).FailNow", Method, 0}, + {"(*T).Failed", Method, 0}, + {"(*T).Fatal", Method, 0}, + {"(*T).Fatalf", Method, 0}, + {"(*T).Helper", Method, 9}, + {"(*T).Log", Method, 0}, + {"(*T).Logf", Method, 0}, + {"(*T).Name", Method, 8}, + {"(*T).Parallel", Method, 0}, + {"(*T).Run", Method, 7}, + {"(*T).Setenv", Method, 17}, + {"(*T).Skip", Method, 1}, + {"(*T).SkipNow", Method, 1}, + {"(*T).Skipf", Method, 1}, + {"(*T).Skipped", Method, 1}, + {"(*T).TempDir", Method, 15}, + {"(BenchmarkResult).AllocedBytesPerOp", Method, 1}, + {"(BenchmarkResult).AllocsPerOp", Method, 1}, + {"(BenchmarkResult).MemString", Method, 1}, + {"(BenchmarkResult).NsPerOp", Method, 0}, + {"(BenchmarkResult).String", Method, 0}, + {"AllocsPerRun", Func, 1}, + {"B", Type, 0}, + {"B.N", Field, 0}, + {"Benchmark", Func, 0}, + {"BenchmarkResult", Type, 0}, + {"BenchmarkResult.Bytes", Field, 0}, + {"BenchmarkResult.Extra", Field, 13}, + {"BenchmarkResult.MemAllocs", Field, 1}, + {"BenchmarkResult.MemBytes", Field, 1}, + {"BenchmarkResult.N", Field, 0}, + {"BenchmarkResult.T", Field, 0}, + {"Cover", Type, 2}, + {"Cover.Blocks", Field, 2}, + {"Cover.Counters", Field, 2}, + {"Cover.CoveredPackages", Field, 2}, + {"Cover.Mode", Field, 2}, + {"CoverBlock", Type, 2}, + {"CoverBlock.Col0", Field, 2}, + {"CoverBlock.Col1", Field, 2}, + {"CoverBlock.Line0", Field, 2}, + {"CoverBlock.Line1", Field, 2}, + {"CoverBlock.Stmts", Field, 2}, + {"CoverMode", Func, 8}, + {"Coverage", Func, 4}, + {"F", Type, 18}, + {"Init", Func, 13}, + {"InternalBenchmark", Type, 0}, + {"InternalBenchmark.F", Field, 0}, + {"InternalBenchmark.Name", Field, 0}, + {"InternalExample", Type, 0}, + {"InternalExample.F", Field, 0}, + {"InternalExample.Name", Field, 0}, + {"InternalExample.Output", Field, 0}, + {"InternalExample.Unordered", Field, 7}, + {"InternalFuzzTarget", Type, 18}, + {"InternalFuzzTarget.Fn", Field, 18}, + {"InternalFuzzTarget.Name", Field, 18}, + {"InternalTest", Type, 0}, + {"InternalTest.F", Field, 0}, + {"InternalTest.Name", Field, 0}, + {"M", Type, 4}, + {"Main", Func, 0}, + {"MainStart", Func, 4}, + {"PB", Type, 3}, + {"RegisterCover", Func, 2}, + {"RunBenchmarks", Func, 0}, + {"RunExamples", Func, 0}, + {"RunTests", Func, 0}, + {"Short", Func, 0}, + {"T", Type, 0}, + {"TB", Type, 2}, + {"Testing", Func, 21}, + {"Verbose", Func, 1}, + }, + "testing/fstest": { + {"(MapFS).Glob", Method, 16}, + {"(MapFS).Open", Method, 16}, + {"(MapFS).ReadDir", Method, 16}, + {"(MapFS).ReadFile", Method, 16}, + {"(MapFS).Stat", Method, 16}, + {"(MapFS).Sub", Method, 16}, + {"MapFS", Type, 16}, + {"MapFile", Type, 16}, + {"MapFile.Data", Field, 16}, + {"MapFile.ModTime", Field, 16}, + {"MapFile.Mode", Field, 16}, + {"MapFile.Sys", Field, 16}, + {"TestFS", Func, 16}, + }, + "testing/iotest": { + {"DataErrReader", Func, 0}, + {"ErrReader", Func, 16}, + {"ErrTimeout", Var, 0}, + {"HalfReader", Func, 0}, + {"NewReadLogger", Func, 0}, + {"NewWriteLogger", Func, 0}, + {"OneByteReader", Func, 0}, + {"TestReader", Func, 16}, + {"TimeoutReader", Func, 0}, + {"TruncateWriter", Func, 0}, + }, + "testing/quick": { + {"(*CheckEqualError).Error", Method, 0}, + {"(*CheckError).Error", Method, 0}, + {"(SetupError).Error", Method, 0}, + {"Check", Func, 0}, + {"CheckEqual", Func, 0}, + {"CheckEqualError", Type, 0}, + {"CheckEqualError.CheckError", Field, 0}, + {"CheckEqualError.Out1", Field, 0}, + {"CheckEqualError.Out2", Field, 0}, + {"CheckError", Type, 0}, + {"CheckError.Count", Field, 0}, + {"CheckError.In", Field, 0}, + {"Config", Type, 0}, + {"Config.MaxCount", Field, 0}, + {"Config.MaxCountScale", Field, 0}, + {"Config.Rand", Field, 0}, + {"Config.Values", Field, 0}, + {"Generator", Type, 0}, + {"SetupError", Type, 0}, + {"Value", Func, 0}, + }, + "testing/slogtest": { + {"Run", Func, 22}, + {"TestHandler", Func, 21}, + }, + "text/scanner": { + {"(*Position).IsValid", Method, 0}, + {"(*Scanner).Init", Method, 0}, + {"(*Scanner).IsValid", Method, 0}, + {"(*Scanner).Next", Method, 0}, + {"(*Scanner).Peek", Method, 0}, + {"(*Scanner).Pos", Method, 0}, + {"(*Scanner).Scan", Method, 0}, + {"(*Scanner).TokenText", Method, 0}, + {"(Position).String", Method, 0}, + {"(Scanner).String", Method, 0}, + {"Char", Const, 0}, + {"Comment", Const, 0}, + {"EOF", Const, 0}, + {"Float", Const, 0}, + {"GoTokens", Const, 0}, + {"GoWhitespace", Const, 0}, + {"Ident", Const, 0}, + {"Int", Const, 0}, + {"Position", Type, 0}, + {"Position.Column", Field, 0}, + {"Position.Filename", Field, 0}, + {"Position.Line", Field, 0}, + {"Position.Offset", Field, 0}, + {"RawString", Const, 0}, + {"ScanChars", Const, 0}, + {"ScanComments", Const, 0}, + {"ScanFloats", Const, 0}, + {"ScanIdents", Const, 0}, + {"ScanInts", Const, 0}, + {"ScanRawStrings", Const, 0}, + {"ScanStrings", Const, 0}, + {"Scanner", Type, 0}, + {"Scanner.Error", Field, 0}, + {"Scanner.ErrorCount", Field, 0}, + {"Scanner.IsIdentRune", Field, 4}, + {"Scanner.Mode", Field, 0}, + {"Scanner.Position", Field, 0}, + {"Scanner.Whitespace", Field, 0}, + {"SkipComments", Const, 0}, + {"String", Const, 0}, + {"TokenString", Func, 0}, + }, + "text/tabwriter": { + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Init", Method, 0}, + {"(*Writer).Write", Method, 0}, + {"AlignRight", Const, 0}, + {"Debug", Const, 0}, + {"DiscardEmptyColumns", Const, 0}, + {"Escape", Const, 0}, + {"FilterHTML", Const, 0}, + {"NewWriter", Func, 0}, + {"StripEscape", Const, 0}, + {"TabIndent", Const, 0}, + {"Writer", Type, 0}, + }, + "text/template": { + {"(*Template).AddParseTree", Method, 0}, + {"(*Template).Clone", Method, 0}, + {"(*Template).DefinedTemplates", Method, 5}, + {"(*Template).Delims", Method, 0}, + {"(*Template).Execute", Method, 0}, + {"(*Template).ExecuteTemplate", Method, 0}, + {"(*Template).Funcs", Method, 0}, + {"(*Template).Lookup", Method, 0}, + {"(*Template).Name", Method, 0}, + {"(*Template).New", Method, 0}, + {"(*Template).Option", Method, 5}, + {"(*Template).Parse", Method, 0}, + {"(*Template).ParseFS", Method, 16}, + {"(*Template).ParseFiles", Method, 0}, + {"(*Template).ParseGlob", Method, 0}, + {"(*Template).Templates", Method, 0}, + {"(ExecError).Error", Method, 6}, + {"(ExecError).Unwrap", Method, 13}, + {"(Template).Copy", Method, 2}, + {"(Template).ErrorContext", Method, 1}, + {"ExecError", Type, 6}, + {"ExecError.Err", Field, 6}, + {"ExecError.Name", Field, 6}, + {"FuncMap", Type, 0}, + {"HTMLEscape", Func, 0}, + {"HTMLEscapeString", Func, 0}, + {"HTMLEscaper", Func, 0}, + {"IsTrue", Func, 6}, + {"JSEscape", Func, 0}, + {"JSEscapeString", Func, 0}, + {"JSEscaper", Func, 0}, + {"Must", Func, 0}, + {"New", Func, 0}, + {"ParseFS", Func, 16}, + {"ParseFiles", Func, 0}, + {"ParseGlob", Func, 0}, + {"Template", Type, 0}, + {"Template.Tree", Field, 0}, + {"URLQueryEscaper", Func, 0}, + }, + "text/template/parse": { + {"(*ActionNode).Copy", Method, 0}, + {"(*ActionNode).String", Method, 0}, + {"(*BoolNode).Copy", Method, 0}, + {"(*BoolNode).String", Method, 0}, + {"(*BranchNode).Copy", Method, 4}, + {"(*BranchNode).String", Method, 0}, + {"(*BreakNode).Copy", Method, 18}, + {"(*BreakNode).String", Method, 18}, + {"(*ChainNode).Add", Method, 1}, + {"(*ChainNode).Copy", Method, 1}, + {"(*ChainNode).String", Method, 1}, + {"(*CommandNode).Copy", Method, 0}, + {"(*CommandNode).String", Method, 0}, + {"(*CommentNode).Copy", Method, 16}, + {"(*CommentNode).String", Method, 16}, + {"(*ContinueNode).Copy", Method, 18}, + {"(*ContinueNode).String", Method, 18}, + {"(*DotNode).Copy", Method, 0}, + {"(*DotNode).String", Method, 0}, + {"(*DotNode).Type", Method, 0}, + {"(*FieldNode).Copy", Method, 0}, + {"(*FieldNode).String", Method, 0}, + {"(*IdentifierNode).Copy", Method, 0}, + {"(*IdentifierNode).SetPos", Method, 1}, + {"(*IdentifierNode).SetTree", Method, 4}, + {"(*IdentifierNode).String", Method, 0}, + {"(*IfNode).Copy", Method, 0}, + {"(*IfNode).String", Method, 0}, + {"(*ListNode).Copy", Method, 0}, + {"(*ListNode).CopyList", Method, 0}, + {"(*ListNode).String", Method, 0}, + {"(*NilNode).Copy", Method, 1}, + {"(*NilNode).String", Method, 1}, + {"(*NilNode).Type", Method, 1}, + {"(*NumberNode).Copy", Method, 0}, + {"(*NumberNode).String", Method, 0}, + {"(*PipeNode).Copy", Method, 0}, + {"(*PipeNode).CopyPipe", Method, 0}, + {"(*PipeNode).String", Method, 0}, + {"(*RangeNode).Copy", Method, 0}, + {"(*RangeNode).String", Method, 0}, + {"(*StringNode).Copy", Method, 0}, + {"(*StringNode).String", Method, 0}, + {"(*TemplateNode).Copy", Method, 0}, + {"(*TemplateNode).String", Method, 0}, + {"(*TextNode).Copy", Method, 0}, + {"(*TextNode).String", Method, 0}, + {"(*Tree).Copy", Method, 2}, + {"(*Tree).ErrorContext", Method, 1}, + {"(*Tree).Parse", Method, 0}, + {"(*VariableNode).Copy", Method, 0}, + {"(*VariableNode).String", Method, 0}, + {"(*WithNode).Copy", Method, 0}, + {"(*WithNode).String", Method, 0}, + {"(ActionNode).Position", Method, 1}, + {"(ActionNode).Type", Method, 0}, + {"(BoolNode).Position", Method, 1}, + {"(BoolNode).Type", Method, 0}, + {"(BranchNode).Position", Method, 1}, + {"(BranchNode).Type", Method, 0}, + {"(BreakNode).Position", Method, 18}, + {"(BreakNode).Type", Method, 18}, + {"(ChainNode).Position", Method, 1}, + {"(ChainNode).Type", Method, 1}, + {"(CommandNode).Position", Method, 1}, + {"(CommandNode).Type", Method, 0}, + {"(CommentNode).Position", Method, 16}, + {"(CommentNode).Type", Method, 16}, + {"(ContinueNode).Position", Method, 18}, + {"(ContinueNode).Type", Method, 18}, + {"(DotNode).Position", Method, 1}, + {"(FieldNode).Position", Method, 1}, + {"(FieldNode).Type", Method, 0}, + {"(IdentifierNode).Position", Method, 1}, + {"(IdentifierNode).Type", Method, 0}, + {"(IfNode).Position", Method, 1}, + {"(IfNode).Type", Method, 0}, + {"(ListNode).Position", Method, 1}, + {"(ListNode).Type", Method, 0}, + {"(NilNode).Position", Method, 1}, + {"(NodeType).Type", Method, 0}, + {"(NumberNode).Position", Method, 1}, + {"(NumberNode).Type", Method, 0}, + {"(PipeNode).Position", Method, 1}, + {"(PipeNode).Type", Method, 0}, + {"(Pos).Position", Method, 1}, + {"(RangeNode).Position", Method, 1}, + {"(RangeNode).Type", Method, 0}, + {"(StringNode).Position", Method, 1}, + {"(StringNode).Type", Method, 0}, + {"(TemplateNode).Position", Method, 1}, + {"(TemplateNode).Type", Method, 0}, + {"(TextNode).Position", Method, 1}, + {"(TextNode).Type", Method, 0}, + {"(VariableNode).Position", Method, 1}, + {"(VariableNode).Type", Method, 0}, + {"(WithNode).Position", Method, 1}, + {"(WithNode).Type", Method, 0}, + {"ActionNode", Type, 0}, + {"ActionNode.Line", Field, 0}, + {"ActionNode.NodeType", Field, 0}, + {"ActionNode.Pipe", Field, 0}, + {"ActionNode.Pos", Field, 1}, + {"BoolNode", Type, 0}, + {"BoolNode.NodeType", Field, 0}, + {"BoolNode.Pos", Field, 1}, + {"BoolNode.True", Field, 0}, + {"BranchNode", Type, 0}, + {"BranchNode.ElseList", Field, 0}, + {"BranchNode.Line", Field, 0}, + {"BranchNode.List", Field, 0}, + {"BranchNode.NodeType", Field, 0}, + {"BranchNode.Pipe", Field, 0}, + {"BranchNode.Pos", Field, 1}, + {"BreakNode", Type, 18}, + {"BreakNode.Line", Field, 18}, + {"BreakNode.NodeType", Field, 18}, + {"BreakNode.Pos", Field, 18}, + {"ChainNode", Type, 1}, + {"ChainNode.Field", Field, 1}, + {"ChainNode.Node", Field, 1}, + {"ChainNode.NodeType", Field, 1}, + {"ChainNode.Pos", Field, 1}, + {"CommandNode", Type, 0}, + {"CommandNode.Args", Field, 0}, + {"CommandNode.NodeType", Field, 0}, + {"CommandNode.Pos", Field, 1}, + {"CommentNode", Type, 16}, + {"CommentNode.NodeType", Field, 16}, + {"CommentNode.Pos", Field, 16}, + {"CommentNode.Text", Field, 16}, + {"ContinueNode", Type, 18}, + {"ContinueNode.Line", Field, 18}, + {"ContinueNode.NodeType", Field, 18}, + {"ContinueNode.Pos", Field, 18}, + {"DotNode", Type, 0}, + {"DotNode.NodeType", Field, 4}, + {"DotNode.Pos", Field, 1}, + {"FieldNode", Type, 0}, + {"FieldNode.Ident", Field, 0}, + {"FieldNode.NodeType", Field, 0}, + {"FieldNode.Pos", Field, 1}, + {"IdentifierNode", Type, 0}, + {"IdentifierNode.Ident", Field, 0}, + {"IdentifierNode.NodeType", Field, 0}, + {"IdentifierNode.Pos", Field, 1}, + {"IfNode", Type, 0}, + {"IfNode.BranchNode", Field, 0}, + {"IsEmptyTree", Func, 0}, + {"ListNode", Type, 0}, + {"ListNode.NodeType", Field, 0}, + {"ListNode.Nodes", Field, 0}, + {"ListNode.Pos", Field, 1}, + {"Mode", Type, 16}, + {"New", Func, 0}, + {"NewIdentifier", Func, 0}, + {"NilNode", Type, 1}, + {"NilNode.NodeType", Field, 4}, + {"NilNode.Pos", Field, 1}, + {"Node", Type, 0}, + {"NodeAction", Const, 0}, + {"NodeBool", Const, 0}, + {"NodeBreak", Const, 18}, + {"NodeChain", Const, 1}, + {"NodeCommand", Const, 0}, + {"NodeComment", Const, 16}, + {"NodeContinue", Const, 18}, + {"NodeDot", Const, 0}, + {"NodeField", Const, 0}, + {"NodeIdentifier", Const, 0}, + {"NodeIf", Const, 0}, + {"NodeList", Const, 0}, + {"NodeNil", Const, 1}, + {"NodeNumber", Const, 0}, + {"NodePipe", Const, 0}, + {"NodeRange", Const, 0}, + {"NodeString", Const, 0}, + {"NodeTemplate", Const, 0}, + {"NodeText", Const, 0}, + {"NodeType", Type, 0}, + {"NodeVariable", Const, 0}, + {"NodeWith", Const, 0}, + {"NumberNode", Type, 0}, + {"NumberNode.Complex128", Field, 0}, + {"NumberNode.Float64", Field, 0}, + {"NumberNode.Int64", Field, 0}, + {"NumberNode.IsComplex", Field, 0}, + {"NumberNode.IsFloat", Field, 0}, + {"NumberNode.IsInt", Field, 0}, + {"NumberNode.IsUint", Field, 0}, + {"NumberNode.NodeType", Field, 0}, + {"NumberNode.Pos", Field, 1}, + {"NumberNode.Text", Field, 0}, + {"NumberNode.Uint64", Field, 0}, + {"Parse", Func, 0}, + {"ParseComments", Const, 16}, + {"PipeNode", Type, 0}, + {"PipeNode.Cmds", Field, 0}, + {"PipeNode.Decl", Field, 0}, + {"PipeNode.IsAssign", Field, 11}, + {"PipeNode.Line", Field, 0}, + {"PipeNode.NodeType", Field, 0}, + {"PipeNode.Pos", Field, 1}, + {"Pos", Type, 1}, + {"RangeNode", Type, 0}, + {"RangeNode.BranchNode", Field, 0}, + {"SkipFuncCheck", Const, 17}, + {"StringNode", Type, 0}, + {"StringNode.NodeType", Field, 0}, + {"StringNode.Pos", Field, 1}, + {"StringNode.Quoted", Field, 0}, + {"StringNode.Text", Field, 0}, + {"TemplateNode", Type, 0}, + {"TemplateNode.Line", Field, 0}, + {"TemplateNode.Name", Field, 0}, + {"TemplateNode.NodeType", Field, 0}, + {"TemplateNode.Pipe", Field, 0}, + {"TemplateNode.Pos", Field, 1}, + {"TextNode", Type, 0}, + {"TextNode.NodeType", Field, 0}, + {"TextNode.Pos", Field, 1}, + {"TextNode.Text", Field, 0}, + {"Tree", Type, 0}, + {"Tree.Mode", Field, 16}, + {"Tree.Name", Field, 0}, + {"Tree.ParseName", Field, 1}, + {"Tree.Root", Field, 0}, + {"VariableNode", Type, 0}, + {"VariableNode.Ident", Field, 0}, + {"VariableNode.NodeType", Field, 0}, + {"VariableNode.Pos", Field, 1}, + {"WithNode", Type, 0}, + {"WithNode.BranchNode", Field, 0}, + }, + "time": { + {"(*Location).String", Method, 0}, + {"(*ParseError).Error", Method, 0}, + {"(*Ticker).Reset", Method, 15}, + {"(*Ticker).Stop", Method, 0}, + {"(*Time).GobDecode", Method, 0}, + {"(*Time).UnmarshalBinary", Method, 2}, + {"(*Time).UnmarshalJSON", Method, 0}, + {"(*Time).UnmarshalText", Method, 2}, + {"(*Timer).Reset", Method, 1}, + {"(*Timer).Stop", Method, 0}, + {"(Duration).Abs", Method, 19}, + {"(Duration).Hours", Method, 0}, + {"(Duration).Microseconds", Method, 13}, + {"(Duration).Milliseconds", Method, 13}, + {"(Duration).Minutes", Method, 0}, + {"(Duration).Nanoseconds", Method, 0}, + {"(Duration).Round", Method, 9}, + {"(Duration).Seconds", Method, 0}, + {"(Duration).String", Method, 0}, + {"(Duration).Truncate", Method, 9}, + {"(Month).String", Method, 0}, + {"(Time).Add", Method, 0}, + {"(Time).AddDate", Method, 0}, + {"(Time).After", Method, 0}, + {"(Time).AppendFormat", Method, 5}, + {"(Time).Before", Method, 0}, + {"(Time).Clock", Method, 0}, + {"(Time).Compare", Method, 20}, + {"(Time).Date", Method, 0}, + {"(Time).Day", Method, 0}, + {"(Time).Equal", Method, 0}, + {"(Time).Format", Method, 0}, + {"(Time).GoString", Method, 17}, + {"(Time).GobEncode", Method, 0}, + {"(Time).Hour", Method, 0}, + {"(Time).ISOWeek", Method, 0}, + {"(Time).In", Method, 0}, + {"(Time).IsDST", Method, 17}, + {"(Time).IsZero", Method, 0}, + {"(Time).Local", Method, 0}, + {"(Time).Location", Method, 0}, + {"(Time).MarshalBinary", Method, 2}, + {"(Time).MarshalJSON", Method, 0}, + {"(Time).MarshalText", Method, 2}, + {"(Time).Minute", Method, 0}, + {"(Time).Month", Method, 0}, + {"(Time).Nanosecond", Method, 0}, + {"(Time).Round", Method, 1}, + {"(Time).Second", Method, 0}, + {"(Time).String", Method, 0}, + {"(Time).Sub", Method, 0}, + {"(Time).Truncate", Method, 1}, + {"(Time).UTC", Method, 0}, + {"(Time).Unix", Method, 0}, + {"(Time).UnixMicro", Method, 17}, + {"(Time).UnixMilli", Method, 17}, + {"(Time).UnixNano", Method, 0}, + {"(Time).Weekday", Method, 0}, + {"(Time).Year", Method, 0}, + {"(Time).YearDay", Method, 1}, + {"(Time).Zone", Method, 0}, + {"(Time).ZoneBounds", Method, 19}, + {"(Weekday).String", Method, 0}, + {"ANSIC", Const, 0}, + {"After", Func, 0}, + {"AfterFunc", Func, 0}, + {"April", Const, 0}, + {"August", Const, 0}, + {"Date", Func, 0}, + {"DateOnly", Const, 20}, + {"DateTime", Const, 20}, + {"December", Const, 0}, + {"Duration", Type, 0}, + {"February", Const, 0}, + {"FixedZone", Func, 0}, + {"Friday", Const, 0}, + {"Hour", Const, 0}, + {"January", Const, 0}, + {"July", Const, 0}, + {"June", Const, 0}, + {"Kitchen", Const, 0}, + {"Layout", Const, 17}, + {"LoadLocation", Func, 0}, + {"LoadLocationFromTZData", Func, 10}, + {"Local", Var, 0}, + {"Location", Type, 0}, + {"March", Const, 0}, + {"May", Const, 0}, + {"Microsecond", Const, 0}, + {"Millisecond", Const, 0}, + {"Minute", Const, 0}, + {"Monday", Const, 0}, + {"Month", Type, 0}, + {"Nanosecond", Const, 0}, + {"NewTicker", Func, 0}, + {"NewTimer", Func, 0}, + {"November", Const, 0}, + {"Now", Func, 0}, + {"October", Const, 0}, + {"Parse", Func, 0}, + {"ParseDuration", Func, 0}, + {"ParseError", Type, 0}, + {"ParseError.Layout", Field, 0}, + {"ParseError.LayoutElem", Field, 0}, + {"ParseError.Message", Field, 0}, + {"ParseError.Value", Field, 0}, + {"ParseError.ValueElem", Field, 0}, + {"ParseInLocation", Func, 1}, + {"RFC1123", Const, 0}, + {"RFC1123Z", Const, 0}, + {"RFC3339", Const, 0}, + {"RFC3339Nano", Const, 0}, + {"RFC822", Const, 0}, + {"RFC822Z", Const, 0}, + {"RFC850", Const, 0}, + {"RubyDate", Const, 0}, + {"Saturday", Const, 0}, + {"Second", Const, 0}, + {"September", Const, 0}, + {"Since", Func, 0}, + {"Sleep", Func, 0}, + {"Stamp", Const, 0}, + {"StampMicro", Const, 0}, + {"StampMilli", Const, 0}, + {"StampNano", Const, 0}, + {"Sunday", Const, 0}, + {"Thursday", Const, 0}, + {"Tick", Func, 0}, + {"Ticker", Type, 0}, + {"Ticker.C", Field, 0}, + {"Time", Type, 0}, + {"TimeOnly", Const, 20}, + {"Timer", Type, 0}, + {"Timer.C", Field, 0}, + {"Tuesday", Const, 0}, + {"UTC", Var, 0}, + {"Unix", Func, 0}, + {"UnixDate", Const, 0}, + {"UnixMicro", Func, 17}, + {"UnixMilli", Func, 17}, + {"Until", Func, 8}, + {"Wednesday", Const, 0}, + {"Weekday", Type, 0}, + }, + "unicode": { + {"(SpecialCase).ToLower", Method, 0}, + {"(SpecialCase).ToTitle", Method, 0}, + {"(SpecialCase).ToUpper", Method, 0}, + {"ASCII_Hex_Digit", Var, 0}, + {"Adlam", Var, 7}, + {"Ahom", Var, 5}, + {"Anatolian_Hieroglyphs", Var, 5}, + {"Arabic", Var, 0}, + {"Armenian", Var, 0}, + {"Avestan", Var, 0}, + {"AzeriCase", Var, 0}, + {"Balinese", Var, 0}, + {"Bamum", Var, 0}, + {"Bassa_Vah", Var, 4}, + {"Batak", Var, 0}, + {"Bengali", Var, 0}, + {"Bhaiksuki", Var, 7}, + {"Bidi_Control", Var, 0}, + {"Bopomofo", Var, 0}, + {"Brahmi", Var, 0}, + {"Braille", Var, 0}, + {"Buginese", Var, 0}, + {"Buhid", Var, 0}, + {"C", Var, 0}, + {"Canadian_Aboriginal", Var, 0}, + {"Carian", Var, 0}, + {"CaseRange", Type, 0}, + {"CaseRange.Delta", Field, 0}, + {"CaseRange.Hi", Field, 0}, + {"CaseRange.Lo", Field, 0}, + {"CaseRanges", Var, 0}, + {"Categories", Var, 0}, + {"Caucasian_Albanian", Var, 4}, + {"Cc", Var, 0}, + {"Cf", Var, 0}, + {"Chakma", Var, 1}, + {"Cham", Var, 0}, + {"Cherokee", Var, 0}, + {"Chorasmian", Var, 16}, + {"Co", Var, 0}, + {"Common", Var, 0}, + {"Coptic", Var, 0}, + {"Cs", Var, 0}, + {"Cuneiform", Var, 0}, + {"Cypriot", Var, 0}, + {"Cypro_Minoan", Var, 21}, + {"Cyrillic", Var, 0}, + {"Dash", Var, 0}, + {"Deprecated", Var, 0}, + {"Deseret", Var, 0}, + {"Devanagari", Var, 0}, + {"Diacritic", Var, 0}, + {"Digit", Var, 0}, + {"Dives_Akuru", Var, 16}, + {"Dogra", Var, 13}, + {"Duployan", Var, 4}, + {"Egyptian_Hieroglyphs", Var, 0}, + {"Elbasan", Var, 4}, + {"Elymaic", Var, 14}, + {"Ethiopic", Var, 0}, + {"Extender", Var, 0}, + {"FoldCategory", Var, 0}, + {"FoldScript", Var, 0}, + {"Georgian", Var, 0}, + {"Glagolitic", Var, 0}, + {"Gothic", Var, 0}, + {"Grantha", Var, 4}, + {"GraphicRanges", Var, 0}, + {"Greek", Var, 0}, + {"Gujarati", Var, 0}, + {"Gunjala_Gondi", Var, 13}, + {"Gurmukhi", Var, 0}, + {"Han", Var, 0}, + {"Hangul", Var, 0}, + {"Hanifi_Rohingya", Var, 13}, + {"Hanunoo", Var, 0}, + {"Hatran", Var, 5}, + {"Hebrew", Var, 0}, + {"Hex_Digit", Var, 0}, + {"Hiragana", Var, 0}, + {"Hyphen", Var, 0}, + {"IDS_Binary_Operator", Var, 0}, + {"IDS_Trinary_Operator", Var, 0}, + {"Ideographic", Var, 0}, + {"Imperial_Aramaic", Var, 0}, + {"In", Func, 2}, + {"Inherited", Var, 0}, + {"Inscriptional_Pahlavi", Var, 0}, + {"Inscriptional_Parthian", Var, 0}, + {"Is", Func, 0}, + {"IsControl", Func, 0}, + {"IsDigit", Func, 0}, + {"IsGraphic", Func, 0}, + {"IsLetter", Func, 0}, + {"IsLower", Func, 0}, + {"IsMark", Func, 0}, + {"IsNumber", Func, 0}, + {"IsOneOf", Func, 0}, + {"IsPrint", Func, 0}, + {"IsPunct", Func, 0}, + {"IsSpace", Func, 0}, + {"IsSymbol", Func, 0}, + {"IsTitle", Func, 0}, + {"IsUpper", Func, 0}, + {"Javanese", Var, 0}, + {"Join_Control", Var, 0}, + {"Kaithi", Var, 0}, + {"Kannada", Var, 0}, + {"Katakana", Var, 0}, + {"Kawi", Var, 21}, + {"Kayah_Li", Var, 0}, + {"Kharoshthi", Var, 0}, + {"Khitan_Small_Script", Var, 16}, + {"Khmer", Var, 0}, + {"Khojki", Var, 4}, + {"Khudawadi", Var, 4}, + {"L", Var, 0}, + {"Lao", Var, 0}, + {"Latin", Var, 0}, + {"Lepcha", Var, 0}, + {"Letter", Var, 0}, + {"Limbu", Var, 0}, + {"Linear_A", Var, 4}, + {"Linear_B", Var, 0}, + {"Lisu", Var, 0}, + {"Ll", Var, 0}, + {"Lm", Var, 0}, + {"Lo", Var, 0}, + {"Logical_Order_Exception", Var, 0}, + {"Lower", Var, 0}, + {"LowerCase", Const, 0}, + {"Lt", Var, 0}, + {"Lu", Var, 0}, + {"Lycian", Var, 0}, + {"Lydian", Var, 0}, + {"M", Var, 0}, + {"Mahajani", Var, 4}, + {"Makasar", Var, 13}, + {"Malayalam", Var, 0}, + {"Mandaic", Var, 0}, + {"Manichaean", Var, 4}, + {"Marchen", Var, 7}, + {"Mark", Var, 0}, + {"Masaram_Gondi", Var, 10}, + {"MaxASCII", Const, 0}, + {"MaxCase", Const, 0}, + {"MaxLatin1", Const, 0}, + {"MaxRune", Const, 0}, + {"Mc", Var, 0}, + {"Me", Var, 0}, + {"Medefaidrin", Var, 13}, + {"Meetei_Mayek", Var, 0}, + {"Mende_Kikakui", Var, 4}, + {"Meroitic_Cursive", Var, 1}, + {"Meroitic_Hieroglyphs", Var, 1}, + {"Miao", Var, 1}, + {"Mn", Var, 0}, + {"Modi", Var, 4}, + {"Mongolian", Var, 0}, + {"Mro", Var, 4}, + {"Multani", Var, 5}, + {"Myanmar", Var, 0}, + {"N", Var, 0}, + {"Nabataean", Var, 4}, + {"Nag_Mundari", Var, 21}, + {"Nandinagari", Var, 14}, + {"Nd", Var, 0}, + {"New_Tai_Lue", Var, 0}, + {"Newa", Var, 7}, + {"Nko", Var, 0}, + {"Nl", Var, 0}, + {"No", Var, 0}, + {"Noncharacter_Code_Point", Var, 0}, + {"Number", Var, 0}, + {"Nushu", Var, 10}, + {"Nyiakeng_Puachue_Hmong", Var, 14}, + {"Ogham", Var, 0}, + {"Ol_Chiki", Var, 0}, + {"Old_Hungarian", Var, 5}, + {"Old_Italic", Var, 0}, + {"Old_North_Arabian", Var, 4}, + {"Old_Permic", Var, 4}, + {"Old_Persian", Var, 0}, + {"Old_Sogdian", Var, 13}, + {"Old_South_Arabian", Var, 0}, + {"Old_Turkic", Var, 0}, + {"Old_Uyghur", Var, 21}, + {"Oriya", Var, 0}, + {"Osage", Var, 7}, + {"Osmanya", Var, 0}, + {"Other", Var, 0}, + {"Other_Alphabetic", Var, 0}, + {"Other_Default_Ignorable_Code_Point", Var, 0}, + {"Other_Grapheme_Extend", Var, 0}, + {"Other_ID_Continue", Var, 0}, + {"Other_ID_Start", Var, 0}, + {"Other_Lowercase", Var, 0}, + {"Other_Math", Var, 0}, + {"Other_Uppercase", Var, 0}, + {"P", Var, 0}, + {"Pahawh_Hmong", Var, 4}, + {"Palmyrene", Var, 4}, + {"Pattern_Syntax", Var, 0}, + {"Pattern_White_Space", Var, 0}, + {"Pau_Cin_Hau", Var, 4}, + {"Pc", Var, 0}, + {"Pd", Var, 0}, + {"Pe", Var, 0}, + {"Pf", Var, 0}, + {"Phags_Pa", Var, 0}, + {"Phoenician", Var, 0}, + {"Pi", Var, 0}, + {"Po", Var, 0}, + {"Prepended_Concatenation_Mark", Var, 7}, + {"PrintRanges", Var, 0}, + {"Properties", Var, 0}, + {"Ps", Var, 0}, + {"Psalter_Pahlavi", Var, 4}, + {"Punct", Var, 0}, + {"Quotation_Mark", Var, 0}, + {"Radical", Var, 0}, + {"Range16", Type, 0}, + {"Range16.Hi", Field, 0}, + {"Range16.Lo", Field, 0}, + {"Range16.Stride", Field, 0}, + {"Range32", Type, 0}, + {"Range32.Hi", Field, 0}, + {"Range32.Lo", Field, 0}, + {"Range32.Stride", Field, 0}, + {"RangeTable", Type, 0}, + {"RangeTable.LatinOffset", Field, 1}, + {"RangeTable.R16", Field, 0}, + {"RangeTable.R32", Field, 0}, + {"Regional_Indicator", Var, 10}, + {"Rejang", Var, 0}, + {"ReplacementChar", Const, 0}, + {"Runic", Var, 0}, + {"S", Var, 0}, + {"STerm", Var, 0}, + {"Samaritan", Var, 0}, + {"Saurashtra", Var, 0}, + {"Sc", Var, 0}, + {"Scripts", Var, 0}, + {"Sentence_Terminal", Var, 7}, + {"Sharada", Var, 1}, + {"Shavian", Var, 0}, + {"Siddham", Var, 4}, + {"SignWriting", Var, 5}, + {"SimpleFold", Func, 0}, + {"Sinhala", Var, 0}, + {"Sk", Var, 0}, + {"Sm", Var, 0}, + {"So", Var, 0}, + {"Soft_Dotted", Var, 0}, + {"Sogdian", Var, 13}, + {"Sora_Sompeng", Var, 1}, + {"Soyombo", Var, 10}, + {"Space", Var, 0}, + {"SpecialCase", Type, 0}, + {"Sundanese", Var, 0}, + {"Syloti_Nagri", Var, 0}, + {"Symbol", Var, 0}, + {"Syriac", Var, 0}, + {"Tagalog", Var, 0}, + {"Tagbanwa", Var, 0}, + {"Tai_Le", Var, 0}, + {"Tai_Tham", Var, 0}, + {"Tai_Viet", Var, 0}, + {"Takri", Var, 1}, + {"Tamil", Var, 0}, + {"Tangsa", Var, 21}, + {"Tangut", Var, 7}, + {"Telugu", Var, 0}, + {"Terminal_Punctuation", Var, 0}, + {"Thaana", Var, 0}, + {"Thai", Var, 0}, + {"Tibetan", Var, 0}, + {"Tifinagh", Var, 0}, + {"Tirhuta", Var, 4}, + {"Title", Var, 0}, + {"TitleCase", Const, 0}, + {"To", Func, 0}, + {"ToLower", Func, 0}, + {"ToTitle", Func, 0}, + {"ToUpper", Func, 0}, + {"Toto", Var, 21}, + {"TurkishCase", Var, 0}, + {"Ugaritic", Var, 0}, + {"Unified_Ideograph", Var, 0}, + {"Upper", Var, 0}, + {"UpperCase", Const, 0}, + {"UpperLower", Const, 0}, + {"Vai", Var, 0}, + {"Variation_Selector", Var, 0}, + {"Version", Const, 0}, + {"Vithkuqi", Var, 21}, + {"Wancho", Var, 14}, + {"Warang_Citi", Var, 4}, + {"White_Space", Var, 0}, + {"Yezidi", Var, 16}, + {"Yi", Var, 0}, + {"Z", Var, 0}, + {"Zanabazar_Square", Var, 10}, + {"Zl", Var, 0}, + {"Zp", Var, 0}, + {"Zs", Var, 0}, + }, + "unicode/utf16": { + {"AppendRune", Func, 20}, + {"Decode", Func, 0}, + {"DecodeRune", Func, 0}, + {"Encode", Func, 0}, + {"EncodeRune", Func, 0}, + {"IsSurrogate", Func, 0}, + }, + "unicode/utf8": { + {"AppendRune", Func, 18}, + {"DecodeLastRune", Func, 0}, + {"DecodeLastRuneInString", Func, 0}, + {"DecodeRune", Func, 0}, + {"DecodeRuneInString", Func, 0}, + {"EncodeRune", Func, 0}, + {"FullRune", Func, 0}, + {"FullRuneInString", Func, 0}, + {"MaxRune", Const, 0}, + {"RuneCount", Func, 0}, + {"RuneCountInString", Func, 0}, + {"RuneError", Const, 0}, + {"RuneLen", Func, 0}, + {"RuneSelf", Const, 0}, + {"RuneStart", Func, 0}, + {"UTFMax", Const, 0}, + {"Valid", Func, 0}, + {"ValidRune", Func, 1}, + {"ValidString", Func, 0}, + }, + "unsafe": { + {"Add", Func, 0}, + {"Alignof", Func, 0}, + {"Offsetof", Func, 0}, + {"Pointer", Type, 0}, + {"Sizeof", Func, 0}, + {"Slice", Func, 0}, + {"SliceData", Func, 0}, + {"String", Func, 0}, + {"StringData", Func, 0}, + }, +} diff --git a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go new file mode 100644 index 00000000..98904017 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go @@ -0,0 +1,97 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run generate.go + +// Package stdlib provides a table of all exported symbols in the +// standard library, along with the version at which they first +// appeared. +package stdlib + +import ( + "fmt" + "strings" +) + +type Symbol struct { + Name string + Kind Kind + Version Version // Go version that first included the symbol +} + +// A Kind indicates the kind of a symbol: +// function, variable, constant, type, and so on. +type Kind int8 + +const ( + Invalid Kind = iota // Example name: + Type // "Buffer" + Func // "Println" + Var // "EOF" + Const // "Pi" + Field // "Point.X" + Method // "(*Buffer).Grow" +) + +func (kind Kind) String() string { + return [...]string{ + Invalid: "invalid", + Type: "type", + Func: "func", + Var: "var", + Const: "const", + Field: "field", + Method: "method", + }[kind] +} + +// A Version represents a version of Go of the form "go1.%d". +type Version int8 + +// String returns a version string of the form "go1.23", without allocating. +func (v Version) String() string { return versions[v] } + +var versions [30]string // (increase constant as needed) + +func init() { + for i := range versions { + versions[i] = fmt.Sprintf("go1.%d", i) + } +} + +// HasPackage reports whether the specified package path is part of +// the standard library's public API. +func HasPackage(path string) bool { + _, ok := PackageSymbols[path] + return ok +} + +// SplitField splits the field symbol name into type and field +// components. It must be called only on Field symbols. +// +// Example: "File.Package" -> ("File", "Package") +func (sym *Symbol) SplitField() (typename, name string) { + if sym.Kind != Field { + panic("not a field") + } + typename, name, _ = strings.Cut(sym.Name, ".") + return +} + +// SplitMethod splits the method symbol name into pointer, receiver, +// and method components. It must be called only on Method symbols. +// +// Example: "(*Buffer).Grow" -> (true, "Buffer", "Grow") +func (sym *Symbol) SplitMethod() (ptr bool, recv, name string) { + if sym.Kind != Method { + panic("not a method") + } + recv, name, _ = strings.Cut(sym.Name, ".") + recv = recv[len("(") : len(recv)-len(")")] + ptr = recv[0] == '*' + if ptr { + recv = recv[len("*"):] + } + return +} diff --git a/vendor/modules.txt b/vendor/modules.txt index fa83a78a..50a2c552 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -7,7 +7,7 @@ 0xacab.org/leap/bitmask-core/pkg/introducer 0xacab.org/leap/bitmask-core/pkg/models 0xacab.org/leap/bitmask-core/pkg/storage -# 0xacab.org/leap/obfsvpn v1.1.0 +# 0xacab.org/leap/obfsvpn v1.3.1-0.20241121155258-e6b06efc4456 ## explicit; go 1.22 0xacab.org/leap/obfsvpn/client 0xacab.org/leap/obfsvpn/obfsvpn @@ -157,6 +157,12 @@ github.com/go-openapi/swag # github.com/go-openapi/validate v0.24.0 ## explicit; go 1.20 github.com/go-openapi/validate +# github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 +## explicit; go 1.13 +github.com/go-task/slim-sprig +# github.com/google/pprof v0.0.0-20231212022811-ec68065c825e +## explicit; go 1.19 +github.com/google/pprof/profile # github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid @@ -211,6 +217,24 @@ github.com/natefinch/npipe # github.com/oklog/ulid v1.3.1 ## explicit github.com/oklog/ulid +# github.com/onsi/ginkgo/v2 v2.13.2 +## explicit; go 1.18 +github.com/onsi/ginkgo/v2/config +github.com/onsi/ginkgo/v2/formatter +github.com/onsi/ginkgo/v2/ginkgo +github.com/onsi/ginkgo/v2/ginkgo/build +github.com/onsi/ginkgo/v2/ginkgo/command +github.com/onsi/ginkgo/v2/ginkgo/generators +github.com/onsi/ginkgo/v2/ginkgo/internal +github.com/onsi/ginkgo/v2/ginkgo/labels +github.com/onsi/ginkgo/v2/ginkgo/outline +github.com/onsi/ginkgo/v2/ginkgo/run +github.com/onsi/ginkgo/v2/ginkgo/unfocus +github.com/onsi/ginkgo/v2/ginkgo/watch +github.com/onsi/ginkgo/v2/internal/interrupt_handler +github.com/onsi/ginkgo/v2/internal/parallel_support +github.com/onsi/ginkgo/v2/reporters +github.com/onsi/ginkgo/v2/types # github.com/opentracing/opentracing-go v1.2.0 ## explicit; go 1.14 github.com/opentracing/opentracing-go @@ -320,6 +344,22 @@ github.com/pmezard/go-difflib/difflib # github.com/prometheus-community/pro-bing v0.4.0 ## explicit; go 1.19 github.com/prometheus-community/pro-bing +# github.com/quic-go/quic-go v0.47.0 +## explicit; go 1.22 +github.com/quic-go/quic-go +github.com/quic-go/quic-go/internal/ackhandler +github.com/quic-go/quic-go/internal/congestion +github.com/quic-go/quic-go/internal/flowcontrol +github.com/quic-go/quic-go/internal/handshake +github.com/quic-go/quic-go/internal/protocol +github.com/quic-go/quic-go/internal/qerr +github.com/quic-go/quic-go/internal/qtls +github.com/quic-go/quic-go/internal/utils +github.com/quic-go/quic-go/internal/utils/linkedlist +github.com/quic-go/quic-go/internal/utils/ringbuffer +github.com/quic-go/quic-go/internal/wire +github.com/quic-go/quic-go/logging +github.com/quic-go/quic-go/quicvarint # github.com/refraction-networking/utls v1.6.7 ## explicit; go 1.21 github.com/refraction-networking/utls @@ -424,6 +464,10 @@ go.opentelemetry.io/otel/metric/embedded ## explicit; go 1.21 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded +# go.uber.org/mock v0.4.0 +## explicit; go 1.20 +go.uber.org/mock/mockgen +go.uber.org/mock/mockgen/model # golang.org/x/crypto v0.26.0 ## explicit; go 1.20 golang.org/x/crypto/blake2b @@ -447,6 +491,15 @@ golang.org/x/crypto/sha3 golang.org/x/crypto/tea golang.org/x/crypto/twofish golang.org/x/crypto/xtea +# golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 +## explicit; go 1.20 +golang.org/x/exp/rand +# golang.org/x/mod v0.17.0 +## explicit; go 1.18 +golang.org/x/mod/internal/lazyregexp +golang.org/x/mod/modfile +golang.org/x/mod/module +golang.org/x/mod/semver # golang.org/x/net v0.28.0 ## explicit; go 1.18 golang.org/x/net/bpf @@ -481,8 +534,19 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# google.golang.org/protobuf v1.33.0 -## explicit; go 1.17 +# golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d +## explicit; go 1.19 +golang.org/x/tools/go/ast/astutil +golang.org/x/tools/go/ast/inspector +golang.org/x/tools/imports +golang.org/x/tools/internal/event +golang.org/x/tools/internal/event/core +golang.org/x/tools/internal/event/keys +golang.org/x/tools/internal/event/label +golang.org/x/tools/internal/gocommand +golang.org/x/tools/internal/gopathwalk +golang.org/x/tools/internal/imports +golang.org/x/tools/internal/stdlib # gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce ## explicit # gopkg.in/yaml.v3 v3.0.1